hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
ed56a3284ad05c29413656d888cd25d24a08dc0f.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx_conv.h"
#include <cassert>
#include <cstdio>
#include <cuda/std/ccomplex>
using namespace matx;
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
typedef cuda::std::complex<float> complex;
uint32_t iterations = 10;
index_t numSamples = 16384000;
constexpr index_t filterLen = 10;
index_t batches = 100;
float time_ms;
std::cout << "Iterations: " << iterations << std::endl;
std::cout << "NumSamples: " << numSamples << std::endl;
std::cout << "Batches: " << batches << std::endl;
hipStream_t stream;
hipStreamCreate(&stream);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
using InType = float;
using OutType = float;
using FilterType = float;
// Create data objects
tensor_t<InType, 2> inView({batches, numSamples});
tensor_t<InType, 2> outView({batches, numSamples + filterLen - 1});
// tensor_t<InType, 1> solData({numSamples});
tensor_t<FilterType, 1> filterView({filterLen});
// initialize input data
for (index_t b = 0; b < batches; b++) {
for (index_t i = 0; i < inView.Size(0); i++) {
inView(b,
i) = {static_cast<float>(static_cast<double>(i & 32) / 16.0 - 1)};
// solData.Data()[i] = {0.0};
}
}
// // Init solution
// for (int i = 0; i < (int)numSamples; i++) {
// for (int i_nr = 0; i_nr < (int)filterLen; i_nr++) {
// if ((i - i_nr) >= 0) {
// solData.Data()[i] += coeffs.Data()[i_nr] * inData.Data()[i - i_nr];
// }
// }
// }
// Init Filters
filterView.SetVals({2, 1});
inView.PrefetchDevice(stream);
filterView.PrefetchDevice(stream);
// Measure recursive runtime
hipStreamSynchronize(stream);
hipEventRecord(start, stream);
for (uint32_t i = 0; i < iterations; i++) {
conv1d(outView, inView, filterView, matxConvCorrMode_t::MATX_C_MODE_FULL,
stream);
}
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&time_ms, start, stop);
time_ms /= static_cast<float>(iterations);
printf("Convolution kernel time = %.2fus (%.2fGB/s), %.2f billion/s\n",
time_ms * 1e3,
static_cast<double>(batches * inView.Size(1) * sizeof(InType) * 2) /
1e9 / (time_ms / 1e3),
static_cast<double>(batches * inView.Size(1)) / 1e9 / (time_ms / 1e3));
// 2D convolution of a 4x4 filter with a 10,000x10,000 input signal
// constexpr int filter_dim_2d = 4;
// tensor_t<InType, 2> filter2DData(
// {filter_dim_2d, filter_dim_2d});
// auto filter2DView = filter2DData.View();
// tensor_t<InType, 2> in2DData(
// {(uint32_t)10e3, (uint32_t)10e3});
// auto in2DView = in2DData.View();
// tensor_t<OutType, 2> out2DData(
// {(uint32_t) 10e3 + filter_dim_2d - 1, (uint32_t) 10e3 + filter_dim_2d -
// 1});
// auto out2DView = out2DData.View();
// filter2DData.PrefetchDevice(stream);
// in2DData.PrefetchDevice(stream);
// out2DData.PrefetchDevice(stream);
// Measure recursive runtime
hipEventRecord(start, stream);
// for (auto i = 0; i < iterations; i++)
// {
// if (matxDirectConv2DM(out2DView, in2DView, filter2DView, stream) !=
// matxSuccess) {
// printf("Error running convolution\n");
// }
// }
hipEventRecord(stop, stream);
hipStreamSynchronize(stream);
hipEventElapsedTime(&time_ms, start, stop);
time_ms /= static_cast<float>(iterations);
printf("2D Convolution kernel time = %.2fus (%.2fGB/s), %.2f billion/s\n",
time_ms * 1e3,
static_cast<double>(inView.Size(0) * inView.Size(1) * sizeof(InType) *
2) /
1e9 / (time_ms / 1e3),
static_cast<double>(inView.Size(0) * inView.Size(1)) / 1e9 /
(time_ms / 1e3));
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream);
matxPrintMemoryStatistics();
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
| ed56a3284ad05c29413656d888cd25d24a08dc0f.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx_conv.h"
#include <cassert>
#include <cstdio>
#include <cuda/std/ccomplex>
using namespace matx;
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
typedef cuda::std::complex<float> complex;
uint32_t iterations = 10;
index_t numSamples = 16384000;
constexpr index_t filterLen = 10;
index_t batches = 100;
float time_ms;
std::cout << "Iterations: " << iterations << std::endl;
std::cout << "NumSamples: " << numSamples << std::endl;
std::cout << "Batches: " << batches << std::endl;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
using InType = float;
using OutType = float;
using FilterType = float;
// Create data objects
tensor_t<InType, 2> inView({batches, numSamples});
tensor_t<InType, 2> outView({batches, numSamples + filterLen - 1});
// tensor_t<InType, 1> solData({numSamples});
tensor_t<FilterType, 1> filterView({filterLen});
// initialize input data
for (index_t b = 0; b < batches; b++) {
for (index_t i = 0; i < inView.Size(0); i++) {
inView(b,
i) = {static_cast<float>(static_cast<double>(i & 32) / 16.0 - 1)};
// solData.Data()[i] = {0.0};
}
}
// // Init solution
// for (int i = 0; i < (int)numSamples; i++) {
// for (int i_nr = 0; i_nr < (int)filterLen; i_nr++) {
// if ((i - i_nr) >= 0) {
// solData.Data()[i] += coeffs.Data()[i_nr] * inData.Data()[i - i_nr];
// }
// }
// }
// Init Filters
filterView.SetVals({2, 1});
inView.PrefetchDevice(stream);
filterView.PrefetchDevice(stream);
// Measure recursive runtime
cudaStreamSynchronize(stream);
cudaEventRecord(start, stream);
for (uint32_t i = 0; i < iterations; i++) {
conv1d(outView, inView, filterView, matxConvCorrMode_t::MATX_C_MODE_FULL,
stream);
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&time_ms, start, stop);
time_ms /= static_cast<float>(iterations);
printf("Convolution kernel time = %.2fus (%.2fGB/s), %.2f billion/s\n",
time_ms * 1e3,
static_cast<double>(batches * inView.Size(1) * sizeof(InType) * 2) /
1e9 / (time_ms / 1e3),
static_cast<double>(batches * inView.Size(1)) / 1e9 / (time_ms / 1e3));
// 2D convolution of a 4x4 filter with a 10,000x10,000 input signal
// constexpr int filter_dim_2d = 4;
// tensor_t<InType, 2> filter2DData(
// {filter_dim_2d, filter_dim_2d});
// auto filter2DView = filter2DData.View();
// tensor_t<InType, 2> in2DData(
// {(uint32_t)10e3, (uint32_t)10e3});
// auto in2DView = in2DData.View();
// tensor_t<OutType, 2> out2DData(
// {(uint32_t) 10e3 + filter_dim_2d - 1, (uint32_t) 10e3 + filter_dim_2d -
// 1});
// auto out2DView = out2DData.View();
// filter2DData.PrefetchDevice(stream);
// in2DData.PrefetchDevice(stream);
// out2DData.PrefetchDevice(stream);
// Measure recursive runtime
cudaEventRecord(start, stream);
// for (auto i = 0; i < iterations; i++)
// {
// if (matxDirectConv2DM(out2DView, in2DView, filter2DView, stream) !=
// matxSuccess) {
// printf("Error running convolution\n");
// }
// }
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
cudaEventElapsedTime(&time_ms, start, stop);
time_ms /= static_cast<float>(iterations);
printf("2D Convolution kernel time = %.2fus (%.2fGB/s), %.2f billion/s\n",
time_ms * 1e3,
static_cast<double>(inView.Size(0) * inView.Size(1) * sizeof(InType) *
2) /
1e9 / (time_ms / 1e3),
static_cast<double>(inView.Size(0) * inView.Size(1)) / 1e9 /
(time_ms / 1e3));
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream);
matxPrintMemoryStatistics();
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
|
577cb2c1a3964a2ad221c41e09a656e55b4f8c53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/FunctionOfAMatrixUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _elemwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
void _lauch_kernel(int total_n_elems, const func_t& f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( _elemwise_kernel<n_threads, n_elems_per_thread, func_t>)
, dim3(grid), dim3(block), 0, stream, total_n_elems, f);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _compute_linear_combination_internal_kernel(
TensorIterator& iter,
int32_t in_stride,
int32_t coeff_stride,
int32_t num_summations
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_compute_linear_combination_internal_kernel<scalar_t>(
sub_iter, in_stride, coeff_stride, num_summations
);
}
return;
}
auto offset_calc = make_offset_calculator<3>(iter);
char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto loop = [=]C10_DEVICE(int idx) {
auto offsets = offset_calc.get(idx);
auto* __restrict__ out_data = reinterpret_cast<scalar_t*>(
out_ptr + offsets[0]
);
auto* __restrict__ in_data = reinterpret_cast<scalar_t*>(
in_ptr + offsets[1]
);
using primitive_t = typename scalar_value_type<scalar_t>::type;
auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>(
coeff_ptr + offsets[2]
);
// perform summation
for (int32_t i = 0; i < num_summations; ++i) {
*out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride];
}
};
_lauch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
void _compute_linear_combination_cuda_kernel(
TensorIterator& iter,
int64_t in_stride,
int64_t coeff_stride,
int64_t num_summations
) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"_compute_linear_combination_cuda", [&] () {
_compute_linear_combination_internal_kernel<scalar_t>(
iter, in_stride, coeff_stride, num_summations
);
}
);
}
}
REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel);
}} // namespace at::native
| 577cb2c1a3964a2ad221c41e09a656e55b4f8c53.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/FunctionOfAMatrixUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
namespace at { namespace native {
namespace {
template <int n_threads, int n_elems_per_thread, typename func_t>
C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread)
__global__ void _elemwise_kernel(int total_n_elems, func_t f) {
constexpr int total_work_block = n_threads * n_elems_per_thread;
int idx = total_work_block * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < n_elems_per_thread; ++i) {
if (idx < total_n_elems) {
f(idx);
idx += n_threads;
}
}
}
template <int n_threads, int n_elems_per_thread, typename func_t>
void _lauch_kernel(int total_n_elems, const func_t& f) {
TORCH_INTERNAL_ASSERT(
total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max()
);
dim3 block(n_threads);
constexpr int total_work_block = n_threads * n_elems_per_thread;
dim3 grid((total_n_elems + total_work_block - 1) / total_work_block);
auto stream = at::cuda::getCurrentCUDAStream();
_elemwise_kernel<n_threads, n_elems_per_thread, func_t>
<<<grid, block, 0, stream>>>(total_n_elems, f);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t>
void _compute_linear_combination_internal_kernel(
TensorIterator& iter,
int32_t in_stride,
int32_t coeff_stride,
int32_t num_summations
) {
if (iter.numel() == 0) {
return;
}
if (!iter.can_use_32bit_indexing()) {
for (auto& sub_iter : iter.with_32bit_indexing()) {
_compute_linear_combination_internal_kernel<scalar_t>(
sub_iter, in_stride, coeff_stride, num_summations
);
}
return;
}
auto offset_calc = make_offset_calculator<3>(iter);
char* __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0));
char* __restrict__ in_ptr = reinterpret_cast<char*>(iter.data_ptr(1));
char* __restrict__ coeff_ptr = reinterpret_cast<char*>(iter.data_ptr(2));
auto loop = [=]C10_DEVICE(int idx) {
auto offsets = offset_calc.get(idx);
auto* __restrict__ out_data = reinterpret_cast<scalar_t*>(
out_ptr + offsets[0]
);
auto* __restrict__ in_data = reinterpret_cast<scalar_t*>(
in_ptr + offsets[1]
);
using primitive_t = typename scalar_value_type<scalar_t>::type;
auto* __restrict__ coeff_data = reinterpret_cast<primitive_t*>(
coeff_ptr + offsets[2]
);
// perform summation
for (int32_t i = 0; i < num_summations; ++i) {
*out_data += in_data[i * in_stride] * coeff_data[i * coeff_stride];
}
};
_lauch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop);
}
void _compute_linear_combination_cuda_kernel(
TensorIterator& iter,
int64_t in_stride,
int64_t coeff_stride,
int64_t num_summations
) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
"_compute_linear_combination_cuda", [&] () {
_compute_linear_combination_internal_kernel<scalar_t>(
iter, in_stride, coeff_stride, num_summations
);
}
);
}
}
REGISTER_DISPATCH(_compute_linear_combination_stub, &_compute_linear_combination_cuda_kernel);
}} // namespace at::native
|
61bbc881307f94adc5f9a931d03a809657ae991e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_alg.h"
#include "mst.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <ctime>
#include <algorithm>
#include <memory>
#include <vector>
using namespace std;
__global__
static void d_zerout(int* a, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
a[i] = 0;
}
}
__global__
static void d_mark(int* in, int* out, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
out[in[i]] = 1;
}
}
__global__
static void d_set_parent(int* ei, int* ev, int n, int* res)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
int j = ei[i];
res[i] = (j != -1 ? ev[j] : i);
}
}
__global__
static void d_rem_cycles_and_sum(int* ei, int* par, int* ew, int n, int* sb)
{
int i = blockIdx.x*1024 + threadIdx.x;
int w = 0;
if(i < n) {
int j = par[i];
int k = par[j];
if(i != k || i < j) {
w = ew[ei[i]];
ei[i] = j;
} else {
ei[i] = i;
}
}
__shared__ int t[1024];
i = threadIdx.x;
t[i] = w;
for(int p = 512; p >= 1; p /= 2) {
__syncthreads();
if(i < p) {
t[i] += t[i+p];
}
}
if(i == 0) {
sb[blockIdx.x] = t[0];
}
}
__global__
static void d_go_up(int* sa, int* sb, int n, bool* chg)
{
__shared__ bool temp;
int i = blockIdx.x*1024 + threadIdx.x;
if(threadIdx.x == 0) {
temp = false;
}
__syncthreads();
if(i < n) {
int j = sa[i];
int k = sa[j];
sb[i] = k;
if(j != k) {
temp = true;
}
}
__syncthreads();
if(threadIdx.x == 0 && temp) {
*chg = true;
}
}
__global__
static void d_norm_reprs(int* repr, int* t, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
repr[i] = t[repr[i]]-1;
}
}
__global__
static void d_norm_edges(int* eu, int* repr, int* ev, int* ew, uint64_t* res, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
int u = repr[eu[i]];
int v = repr[ev[i]];
if(u != v) {
res[i] = ((uint64_t)u << 40) | ((uint64_t)v << 16) | ew[i];
} else {
res[i] = 0;
}
}
}
#define EU(e) ((int)(e>>40))
#define EV(e) ((int)(e>>16) & ((1<<24)-1))
#define EW(e) ((int)(e & ((1<<16)-1)))
__global__
static void d_mark_lightest_edges(uint64_t* e, int* f, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
uint64_t ec = e[i];
if(ec) {
uint64_t ep = e[i-1];
int u1 = EU(ep);
int v1 = EV(ep);
int u2 = EU(ec);
int v2 = EV(ec);
f[i] = (u1 != u2 || v1 != v2);
} else f[i] = 0;
}
}
__global__
static void d_make_new_edges(uint64_t* e, int* s, int* eu, int* ev, int* ew, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
int j = s[i];
if(j != (i ? s[i-1] : 0)) {
eu[j-1] = EU(e[i]);
ev[j-1] = EV(e[i]);
ew[j-1] = EW(e[i]);
}
}
}
__global__
static void d_update_vert_mapping(int* vm, int* trans, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) vm[i] = trans[vm[i]];
}
uint64_t rem_cycles_and_sum(int* ei, int* par, int* ew, int n)
{
int nb = CEIL(n, 1024);
int* d_sb;
int* h_sb;
cudaErrChk(hipMalloc(&d_sb, 4*nb));
h_sb = new int[nb];
d_rem_cycles_and_sum<<<CB(n)>>>(ei, par, ew, n, d_sb);
cudaErrChk(hipMemcpy(h_sb, d_sb, nb*sizeof(int), hipMemcpyDeviceToHost));
uint64_t ret = 0;
for(int i = 0; i < nb; ++i)
ret += h_sb[i];
cudaErrChk(hipFree(d_sb));
delete[] h_sb;
return ret;
}
void find_reprs(int** ta, int** tb, int n)
{
bool h_chg;
bool* d_chg;
cudaErrChk(hipMalloc(&d_chg, 1));
do {
h_chg = false;
cudaErrChk(hipMemcpy(d_chg, &h_chg, 1, hipMemcpyHostToDevice));
d_go_up<<<CB(n)>>>(*ta, *tb, n, d_chg);
cudaErrChk(hipMemcpy(&h_chg, d_chg, 1, hipMemcpyDeviceToHost));
swap(*ta, *tb);
} while(h_chg);
cudaErrChk(hipFree(d_chg));
}
// returns new n
static int norm_reprs(int* repr, int* t, int n)
{
int nn;
d_zerout<<<CB(n)>>>(t, n);
d_mark<<<CB(n)>>>(repr, t, n);
scan(t, t, n);
cudaErrChk(hipMemcpy(&nn, t+n-1, 4, hipMemcpyDeviceToHost));
d_norm_reprs<<<CB(n)>>>(repr, t, n);
return nn;
}
uint64_t vi_mst(int& n, int m, int* eu, int* ev, int* ew, int* vm /*= NULL*/, int rn /*= 0*/)
{
int* ta;
int* tb;
int* tc;
uint64_t* te;
cudaErrChk(hipMalloc(&ta, 4*n));
cudaErrChk(hipMalloc(&tb, 4*n));
cudaErrChk(hipMalloc(&tc, 4*m));
cudaErrChk(hipMalloc(&te, 8*m));
uint64_t ret = 0;
while(m) {
segreduce(eu, ew, n, m, ta);
d_set_parent<<<CB(n)>>>(ta, ev, n, tb);
ret += rem_cycles_and_sum(ta, tb, ew, n);
find_reprs(&ta, &tb, n);
n = norm_reprs(ta, tb, n);
if(vm) d_update_vert_mapping<<<CB(rn)>>>(vm, ta, rn);
d_norm_edges<<<CB(m)>>>(eu, ta, ev, ew, te, m);
thrust::device_ptr<uint64_t> thrust_ptr(te);
thrust::sort(thrust_ptr, thrust_ptr+m);
d_mark_lightest_edges<<<CB(m)>>>(te, tc, m);
scan(tc, tc, m);
d_make_new_edges<<<CB(m)>>>(te, tc, eu, ev, ew, m);
cudaErrChk(hipMemcpy(&m, tc+m-1, 4, hipMemcpyDeviceToHost));
}
cudaErrChk(hipFree(ta));
cudaErrChk(hipFree(tb));
cudaErrChk(hipFree(tc));
cudaErrChk(hipFree(te));
return ret;
}
uint64_t vi_mst(int n, const vector<edge>& edges, int& time_ms)
{
int m = 2*edges.size();
vector<vector<int>> av(n);
vector<vector<int>> aw(n);
for(auto e : edges) {
av[e.u].push_back(e.v);
aw[e.u].push_back(e.w);
av[e.v].push_back(e.u);
aw[e.v].push_back(e.w);
}
int* h_eu = new int[m];
int* h_ev = new int[m];
int* h_ew = new int[m];
for(int i = 0, b = 0; i < n; ++i) {
vector<int> tmp(av[i].size(), i);
copy(tmp.begin(), tmp.end(), h_eu+b);
copy(av[i].begin(), av[i].end(), h_ev+b);
copy(aw[i].begin(), aw[i].end(), h_ew+b);
b += av[i].size();
}
int* d_eu;
int* d_ev;
int* d_ew;
cudaErrChk(hipMalloc(&d_eu, m*sizeof(int)));
cudaErrChk(hipMalloc(&d_ev, m*sizeof(int)));
cudaErrChk(hipMalloc(&d_ew, m*sizeof(int)));
cudaErrChk(hipMemcpy(d_eu, h_eu, m*sizeof(int), hipMemcpyHostToDevice));
cudaErrChk(hipMemcpy(d_ev, h_ev, m*sizeof(int), hipMemcpyHostToDevice));
cudaErrChk(hipMemcpy(d_ew, h_ew, m*sizeof(int), hipMemcpyHostToDevice));
delete[] h_eu;
delete[] h_ev;
delete[] h_ew;
auto start = clock();
uint64_t ret = vi_mst(n, m, d_eu, d_ev, d_ew);
time_ms = (int)(1000.0 * (clock() - start) / CLOCKS_PER_SEC);
cudaErrChk(hipFree(d_eu));
cudaErrChk(hipFree(d_ev));
cudaErrChk(hipFree(d_ew));
return ret;
}
| 61bbc881307f94adc5f9a931d03a809657ae991e.cu | #include "cuda.h"
#include "cuda_alg.h"
#include "mst.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <ctime>
#include <algorithm>
#include <memory>
#include <vector>
using namespace std;
__global__
static void d_zerout(int* a, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
a[i] = 0;
}
}
__global__
static void d_mark(int* in, int* out, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
out[in[i]] = 1;
}
}
__global__
static void d_set_parent(int* ei, int* ev, int n, int* res)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
int j = ei[i];
res[i] = (j != -1 ? ev[j] : i);
}
}
__global__
static void d_rem_cycles_and_sum(int* ei, int* par, int* ew, int n, int* sb)
{
int i = blockIdx.x*1024 + threadIdx.x;
int w = 0;
if(i < n) {
int j = par[i];
int k = par[j];
if(i != k || i < j) {
w = ew[ei[i]];
ei[i] = j;
} else {
ei[i] = i;
}
}
__shared__ int t[1024];
i = threadIdx.x;
t[i] = w;
for(int p = 512; p >= 1; p /= 2) {
__syncthreads();
if(i < p) {
t[i] += t[i+p];
}
}
if(i == 0) {
sb[blockIdx.x] = t[0];
}
}
__global__
static void d_go_up(int* sa, int* sb, int n, bool* chg)
{
__shared__ bool temp;
int i = blockIdx.x*1024 + threadIdx.x;
if(threadIdx.x == 0) {
temp = false;
}
__syncthreads();
if(i < n) {
int j = sa[i];
int k = sa[j];
sb[i] = k;
if(j != k) {
temp = true;
}
}
__syncthreads();
if(threadIdx.x == 0 && temp) {
*chg = true;
}
}
__global__
static void d_norm_reprs(int* repr, int* t, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) {
repr[i] = t[repr[i]]-1;
}
}
__global__
static void d_norm_edges(int* eu, int* repr, int* ev, int* ew, uint64_t* res, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
int u = repr[eu[i]];
int v = repr[ev[i]];
if(u != v) {
res[i] = ((uint64_t)u << 40) | ((uint64_t)v << 16) | ew[i];
} else {
res[i] = 0;
}
}
}
#define EU(e) ((int)(e>>40))
#define EV(e) ((int)(e>>16) & ((1<<24)-1))
#define EW(e) ((int)(e & ((1<<16)-1)))
__global__
static void d_mark_lightest_edges(uint64_t* e, int* f, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
uint64_t ec = e[i];
if(ec) {
uint64_t ep = e[i-1];
int u1 = EU(ep);
int v1 = EV(ep);
int u2 = EU(ec);
int v2 = EV(ec);
f[i] = (u1 != u2 || v1 != v2);
} else f[i] = 0;
}
}
__global__
static void d_make_new_edges(uint64_t* e, int* s, int* eu, int* ev, int* ew, int m)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < m) {
int j = s[i];
if(j != (i ? s[i-1] : 0)) {
eu[j-1] = EU(e[i]);
ev[j-1] = EV(e[i]);
ew[j-1] = EW(e[i]);
}
}
}
__global__
static void d_update_vert_mapping(int* vm, int* trans, int n)
{
int i = blockIdx.x*1024 + threadIdx.x;
if(i < n) vm[i] = trans[vm[i]];
}
uint64_t rem_cycles_and_sum(int* ei, int* par, int* ew, int n)
{
int nb = CEIL(n, 1024);
int* d_sb;
int* h_sb;
cudaErrChk(cudaMalloc(&d_sb, 4*nb));
h_sb = new int[nb];
d_rem_cycles_and_sum<<<CB(n)>>>(ei, par, ew, n, d_sb);
cudaErrChk(cudaMemcpy(h_sb, d_sb, nb*sizeof(int), cudaMemcpyDeviceToHost));
uint64_t ret = 0;
for(int i = 0; i < nb; ++i)
ret += h_sb[i];
cudaErrChk(cudaFree(d_sb));
delete[] h_sb;
return ret;
}
void find_reprs(int** ta, int** tb, int n)
{
bool h_chg;
bool* d_chg;
cudaErrChk(cudaMalloc(&d_chg, 1));
do {
h_chg = false;
cudaErrChk(cudaMemcpy(d_chg, &h_chg, 1, cudaMemcpyHostToDevice));
d_go_up<<<CB(n)>>>(*ta, *tb, n, d_chg);
cudaErrChk(cudaMemcpy(&h_chg, d_chg, 1, cudaMemcpyDeviceToHost));
swap(*ta, *tb);
} while(h_chg);
cudaErrChk(cudaFree(d_chg));
}
// returns new n
static int norm_reprs(int* repr, int* t, int n)
{
int nn;
d_zerout<<<CB(n)>>>(t, n);
d_mark<<<CB(n)>>>(repr, t, n);
scan(t, t, n);
cudaErrChk(cudaMemcpy(&nn, t+n-1, 4, cudaMemcpyDeviceToHost));
d_norm_reprs<<<CB(n)>>>(repr, t, n);
return nn;
}
uint64_t vi_mst(int& n, int m, int* eu, int* ev, int* ew, int* vm /*= NULL*/, int rn /*= 0*/)
{
int* ta;
int* tb;
int* tc;
uint64_t* te;
cudaErrChk(cudaMalloc(&ta, 4*n));
cudaErrChk(cudaMalloc(&tb, 4*n));
cudaErrChk(cudaMalloc(&tc, 4*m));
cudaErrChk(cudaMalloc(&te, 8*m));
uint64_t ret = 0;
while(m) {
segreduce(eu, ew, n, m, ta);
d_set_parent<<<CB(n)>>>(ta, ev, n, tb);
ret += rem_cycles_and_sum(ta, tb, ew, n);
find_reprs(&ta, &tb, n);
n = norm_reprs(ta, tb, n);
if(vm) d_update_vert_mapping<<<CB(rn)>>>(vm, ta, rn);
d_norm_edges<<<CB(m)>>>(eu, ta, ev, ew, te, m);
thrust::device_ptr<uint64_t> thrust_ptr(te);
thrust::sort(thrust_ptr, thrust_ptr+m);
d_mark_lightest_edges<<<CB(m)>>>(te, tc, m);
scan(tc, tc, m);
d_make_new_edges<<<CB(m)>>>(te, tc, eu, ev, ew, m);
cudaErrChk(cudaMemcpy(&m, tc+m-1, 4, cudaMemcpyDeviceToHost));
}
cudaErrChk(cudaFree(ta));
cudaErrChk(cudaFree(tb));
cudaErrChk(cudaFree(tc));
cudaErrChk(cudaFree(te));
return ret;
}
uint64_t vi_mst(int n, const vector<edge>& edges, int& time_ms)
{
int m = 2*edges.size();
vector<vector<int>> av(n);
vector<vector<int>> aw(n);
for(auto e : edges) {
av[e.u].push_back(e.v);
aw[e.u].push_back(e.w);
av[e.v].push_back(e.u);
aw[e.v].push_back(e.w);
}
int* h_eu = new int[m];
int* h_ev = new int[m];
int* h_ew = new int[m];
for(int i = 0, b = 0; i < n; ++i) {
vector<int> tmp(av[i].size(), i);
copy(tmp.begin(), tmp.end(), h_eu+b);
copy(av[i].begin(), av[i].end(), h_ev+b);
copy(aw[i].begin(), aw[i].end(), h_ew+b);
b += av[i].size();
}
int* d_eu;
int* d_ev;
int* d_ew;
cudaErrChk(cudaMalloc(&d_eu, m*sizeof(int)));
cudaErrChk(cudaMalloc(&d_ev, m*sizeof(int)));
cudaErrChk(cudaMalloc(&d_ew, m*sizeof(int)));
cudaErrChk(cudaMemcpy(d_eu, h_eu, m*sizeof(int), cudaMemcpyHostToDevice));
cudaErrChk(cudaMemcpy(d_ev, h_ev, m*sizeof(int), cudaMemcpyHostToDevice));
cudaErrChk(cudaMemcpy(d_ew, h_ew, m*sizeof(int), cudaMemcpyHostToDevice));
delete[] h_eu;
delete[] h_ev;
delete[] h_ew;
auto start = clock();
uint64_t ret = vi_mst(n, m, d_eu, d_ev, d_ew);
time_ms = (int)(1000.0 * (clock() - start) / CLOCKS_PER_SEC);
cudaErrChk(cudaFree(d_eu));
cudaErrChk(cudaFree(d_ev));
cudaErrChk(cudaFree(d_ew));
return ret;
}
|
e011d396c88c5c274680f65607a5040fb4c65277.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template<typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
greentea_copy<Dtype>(batch->data_.count(),
(cl_mem) (batch->data_.gpu_data()), 0,
(cl_mem) (top[0]->mutable_gpu_data()), 0, &ctx);
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
greentea_copy<Dtype>(batch->label_.count(),
(cl_mem) (batch->label_.gpu_data()), 0,
(cl_mem) (top[1]->mutable_gpu_data()), 0, &ctx);
}
#endif // USE_GREENTEA
}
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| e011d396c88c5c274680f65607a5040fb4c65277.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
namespace caffe {
template<typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
greentea_copy<Dtype>(batch->data_.count(),
(cl_mem) (batch->data_.gpu_data()), 0,
(cl_mem) (top[0]->mutable_gpu_data()), 0, &ctx);
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
greentea_copy<Dtype>(batch->label_.count(),
(cl_mem) (batch->label_.gpu_data()), 0,
(cl_mem) (top[1]->mutable_gpu_data()), 0, &ctx);
}
#endif // USE_GREENTEA
}
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
17e1a8572717564626d9f800da19d3fecd972b8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv) {
printf("%s Starting...\n\n", argv[0]);
// Get the device count
int deviceCount = 0;
//@TODO@ : Complete here
// getCount function
hipGetDeviceCount(&deviceCount);
// Loop over devices
for (int dev = 0; dev < deviceCount; ++dev) {
// Set the current device
hipSetDevice(dev);
// Fill the data structure with device properties
struct hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
//@TODO@ : Complete here
// Display some properties
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
//@TODO@ : Complete here
printf("\nDevice %d: \"%ld\"\n", dev, deviceProp.totalGlobalMem);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.multiProcessorCount);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.clockRate);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsPerBlock);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsPerMultiProcessor);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxGridSize[0]);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsDim[0]);
}
// finish
exit(EXIT_SUCCESS);
}
| 17e1a8572717564626d9f800da19d3fecd972b8b.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv) {
printf("%s Starting...\n\n", argv[0]);
// Get the device count
int deviceCount = 0;
//@TODO@ : Complete here
// getCount function
cudaGetDeviceCount(&deviceCount);
// Loop over devices
for (int dev = 0; dev < deviceCount; ++dev) {
// Set the current device
cudaSetDevice(dev);
// Fill the data structure with device properties
struct cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
//@TODO@ : Complete here
// Display some properties
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
//@TODO@ : Complete here
printf("\nDevice %d: \"%ld\"\n", dev, deviceProp.totalGlobalMem);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.multiProcessorCount);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.clockRate);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsPerBlock);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsPerMultiProcessor);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxGridSize[0]);
printf("\nDevice %d: \"%d\"\n", dev, deviceProp.maxThreadsDim[0]);
}
// finish
exit(EXIT_SUCCESS);
}
|
1fdfd52e97085e9df0613c11fa40aa0cc663c923.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define THREAD_PER_BLOCK 32
#define CUDA_CHK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__global__ void transpose_kernel_global(float* d_img_in, float* d_img_out, int width, int height) {
int pixel_x, pixel_y,threadId_original,threadId_trans; //Declaro variables
pixel_x = blockIdx.x * blockDim.x + threadIdx.x; //Indices imgx anlogo a el CPU transpose
pixel_y = blockIdx.y * blockDim.y + threadIdx.y; //Indices imgy anlogo a el CPU transpose
threadId_original = pixel_y*width+pixel_x; //Indice de acceso a la imagen original
threadId_trans = (pixel_x*height+pixel_y);//Indice de acceso a la transpuesta
if (threadId_original < width * height && threadId_trans < width * height)
d_img_out[threadId_trans] = d_img_in[threadId_original];
}
__global__ void transpose_kernel_shared(float* d_img_in, float* d_img_out, int width, int height) {
extern __shared__ float tile[]; //Defino el arrray tile en shared memory
//PASO 1: Leo variables en la imagen original por filas y copio al tile de forma coalseced por filas
int original_pixel_x, original_pixel_y,threadId_original,threadId_tile_row;
original_pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
original_pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId_original = original_pixel_y * width + original_pixel_x ;//Indice de acceso a la imagen original
threadId_tile_row = threadIdx.y * blockDim.x + threadIdx.x ;//El block dim.x es el ancho del tile
tile[threadId_tile_row] = d_img_in[threadId_original];
__syncthreads(); // Me aseguro que se hayan copiado todos los datos al tile sino algunos threades impertientens se pueden encontrar con datos nulos
// Garantizado los datos en memoria compartida
//PASO 2: Accedo por columnas al tile y calculo ese ndice.
int threadId_tile_col;
threadId_tile_col = threadIdx.x * blockDim.y + threadIdx.y;//El block dim.y es el height del tile
// PASO 3: Pego en las filas de la imagen de salida de forma coalesced
int transpose_pixel_x,transpose_pixel_y,threadId_trans;
transpose_pixel_x = blockIdx.y * blockDim.y + threadIdx.x ;//Se accede por columnas
transpose_pixel_y = blockIdx.x * blockDim.x + threadIdx.y ;
threadId_trans = transpose_pixel_x + transpose_pixel_y * height ;
if (threadId_trans < width * height)
d_img_out[threadId_trans] = tile[threadId_tile_col];
}
__global__ void transpose_kernel_shared_noBankConflicts(float* d_img_in, float* d_img_out, int width, int height) {
__shared__ float tile_b[34][33]; //Defino el arrray tile_b en shared memory
//PASO 1: Leo variables en la imagen original por filas y copio al tile_b de forma coalseced por filas
int original_pixel_x, original_pixel_y,threadId_original;
original_pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
original_pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId_original = original_pixel_y * width + original_pixel_x ;//Indice de acceso a la imagen original
// int threadId_tile_b_row = threadIdx.y * blockDim.x + threadIdx.x ;//El block dim.x es el ancho del tile_b
tile_b[threadIdx.x][threadIdx.y]= d_img_in[threadId_original];
__syncthreads();
//PASO 2: Accedo por columnas al tile_b y calculo ese ndice.
// int threadId_tile_b_col;
// threadId_tile_b_col = threadIdx.x * blockDim.y + threadIdx.y;//El block dim.y es el height del tile_b
// PASO 3: Pego en las filas de la imagen de salida de forma coalesced
int transpose_pixel_x,transpose_pixel_y,threadId_trans;
transpose_pixel_x = blockIdx.y * blockDim.y + threadIdx.x ;//Se accede por columnas
transpose_pixel_y = blockIdx.x * blockDim.x + threadIdx.y ;
threadId_trans = transpose_pixel_x + transpose_pixel_y * height ;
if (threadId_trans < width * height)
d_img_out[threadId_trans] = tile_b[threadIdx.y][threadIdx.x];
}
void transpose_gpu(float * img_in, int width, int height, float * img_out, int threadPerBlockx, int threadPerBlocky) {
float *d_img_in, *d_img_out;
int nbx;
int nby;
unsigned int size_img, tile_size ;
// Determino la cantidad de bloques a utilizar en funcin del tamao de la imagen en pixels y del nmero de bloques pasado como parmetro por el usuario.
width % threadPerBlockx == 0 ? nbx = width / threadPerBlockx : nbx = width / threadPerBlockx + 1;
height % threadPerBlocky == 0 ? nby = height / threadPerBlocky : nby = height / threadPerBlocky + 1;
// Determino el tamao de la imagen en bytes
size_img = width * height * sizeof(float);
// Reservar memoria en la GPU
CUDA_CHK(hipMalloc((void**)&d_img_in, size_img));
CUDA_CHK(hipMalloc((void**)&d_img_out, size_img));
// copiar imagen a la GPU
CUDA_CHK(hipMemcpy(d_img_in, img_in, size_img, hipMemcpyHostToDevice));
CUDA_CHK(hipMemcpy(d_img_out, img_out, size_img, hipMemcpyHostToDevice));
// configurar grilla y lanzar kernel
dim3 grid(nbx,nby);
dim3 block(threadPerBlockx,threadPerBlocky);
// Defino el tamao de la memoria compartida en bytes:
tile_size = threadPerBlockx * threadPerBlocky * sizeof(float);
// Utilizando global mem
hipLaunchKernelGGL(( transpose_kernel_global) , dim3(grid), dim3(block) , 0, 0, d_img_in, d_img_out, width, height);
// Utilizando shared memory para transponer de a pequeos bloques
hipLaunchKernelGGL(( transpose_kernel_shared) , dim3(grid), dim3(block), tile_size , 0, d_img_in, d_img_out, width, height);
// Utilizando shared memory e intentando solucionar conflictos de bancos (el tamao del tile que no sea mlitplo del tamao del bloque)
hipLaunchKernelGGL(( transpose_kernel_shared_noBankConflicts) , dim3(grid), dim3(block) , 0, 0, d_img_in, d_img_out, width, height);
// Obtengo los posibles errores en la llamada al kernel
CUDA_CHK(hipGetLastError());
// Obligo al Kernel a llegar al final de su ejecucion y as obtener los posibles errores
CUDA_CHK(hipDeviceSynchronize());
// transferir resultado a RAM CPU:
CUDA_CHK(hipMemcpy(img_out, d_img_out, size_img, hipMemcpyDeviceToHost));
// liberar GPU global mem:
hipFree(d_img_in);
hipFree(d_img_out);
}
| 1fdfd52e97085e9df0613c11fa40aa0cc663c923.cu | #include "util.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define THREAD_PER_BLOCK 32
#define CUDA_CHK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__global__ void transpose_kernel_global(float* d_img_in, float* d_img_out, int width, int height) {
int pixel_x, pixel_y,threadId_original,threadId_trans; //Declaro variables
pixel_x = blockIdx.x * blockDim.x + threadIdx.x; //Indices imgx análogo a el CPU transpose
pixel_y = blockIdx.y * blockDim.y + threadIdx.y; //Indices imgy análogo a el CPU transpose
threadId_original = pixel_y*width+pixel_x; //Indice de acceso a la imagen original
threadId_trans = (pixel_x*height+pixel_y);//Indice de acceso a la transpuesta
if (threadId_original < width * height && threadId_trans < width * height)
d_img_out[threadId_trans] = d_img_in[threadId_original];
}
__global__ void transpose_kernel_shared(float* d_img_in, float* d_img_out, int width, int height) {
extern __shared__ float tile[]; //Defino el arrray tile en shared memory
//PASO 1: Leo variables en la imagen original por filas y copio al tile de forma coalseced por filas
int original_pixel_x, original_pixel_y,threadId_original,threadId_tile_row;
original_pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
original_pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId_original = original_pixel_y * width + original_pixel_x ;//Indice de acceso a la imagen original
threadId_tile_row = threadIdx.y * blockDim.x + threadIdx.x ;//El block dim.x es el ancho del tile
tile[threadId_tile_row] = d_img_in[threadId_original];
__syncthreads(); // Me aseguro que se hayan copiado todos los datos al tile sino algunos threades impertientens se pueden encontrar con datos nulos
// Garantizado los datos en memoria compartida
//PASO 2: Accedo por columnas al tile y calculo ese índice.
int threadId_tile_col;
threadId_tile_col = threadIdx.x * blockDim.y + threadIdx.y;//El block dim.y es el height del tile
// PASO 3: Pego en las filas de la imagen de salida de forma coalesced
int transpose_pixel_x,transpose_pixel_y,threadId_trans;
transpose_pixel_x = blockIdx.y * blockDim.y + threadIdx.x ;//Se accede por columnas
transpose_pixel_y = blockIdx.x * blockDim.x + threadIdx.y ;
threadId_trans = transpose_pixel_x + transpose_pixel_y * height ;
if (threadId_trans < width * height)
d_img_out[threadId_trans] = tile[threadId_tile_col];
}
__global__ void transpose_kernel_shared_noBankConflicts(float* d_img_in, float* d_img_out, int width, int height) {
__shared__ float tile_b[34][33]; //Defino el arrray tile_b en shared memory
//PASO 1: Leo variables en la imagen original por filas y copio al tile_b de forma coalseced por filas
int original_pixel_x, original_pixel_y,threadId_original;
original_pixel_x = blockIdx.x * blockDim.x + threadIdx.x;
original_pixel_y = blockIdx.y * blockDim.y + threadIdx.y;
threadId_original = original_pixel_y * width + original_pixel_x ;//Indice de acceso a la imagen original
// int threadId_tile_b_row = threadIdx.y * blockDim.x + threadIdx.x ;//El block dim.x es el ancho del tile_b
tile_b[threadIdx.x][threadIdx.y]= d_img_in[threadId_original];
__syncthreads();
//PASO 2: Accedo por columnas al tile_b y calculo ese índice.
// int threadId_tile_b_col;
// threadId_tile_b_col = threadIdx.x * blockDim.y + threadIdx.y;//El block dim.y es el height del tile_b
// PASO 3: Pego en las filas de la imagen de salida de forma coalesced
int transpose_pixel_x,transpose_pixel_y,threadId_trans;
transpose_pixel_x = blockIdx.y * blockDim.y + threadIdx.x ;//Se accede por columnas
transpose_pixel_y = blockIdx.x * blockDim.x + threadIdx.y ;
threadId_trans = transpose_pixel_x + transpose_pixel_y * height ;
if (threadId_trans < width * height)
d_img_out[threadId_trans] = tile_b[threadIdx.y][threadIdx.x];
}
void transpose_gpu(float * img_in, int width, int height, float * img_out, int threadPerBlockx, int threadPerBlocky) {
float *d_img_in, *d_img_out;
int nbx;
int nby;
unsigned int size_img, tile_size ;
// Determino la cantidad de bloques a utilizar en función del tamaño de la imagen en pixels y del número de bloques pasado como parámetro por el usuario.
width % threadPerBlockx == 0 ? nbx = width / threadPerBlockx : nbx = width / threadPerBlockx + 1;
height % threadPerBlocky == 0 ? nby = height / threadPerBlocky : nby = height / threadPerBlocky + 1;
// Determino el tamaño de la imagen en bytes
size_img = width * height * sizeof(float);
// Reservar memoria en la GPU
CUDA_CHK(cudaMalloc((void**)&d_img_in, size_img));
CUDA_CHK(cudaMalloc((void**)&d_img_out, size_img));
// copiar imagen a la GPU
CUDA_CHK(cudaMemcpy(d_img_in, img_in, size_img, cudaMemcpyHostToDevice));
CUDA_CHK(cudaMemcpy(d_img_out, img_out, size_img, cudaMemcpyHostToDevice));
// configurar grilla y lanzar kernel
dim3 grid(nbx,nby);
dim3 block(threadPerBlockx,threadPerBlocky);
// Defino el tamaño de la memoria compartida en bytes:
tile_size = threadPerBlockx * threadPerBlocky * sizeof(float);
// Utilizando global mem
transpose_kernel_global <<< grid, block >>> (d_img_in, d_img_out, width, height);
// Utilizando shared memory para transponer de a pequeños bloques
transpose_kernel_shared <<< grid, block, tile_size >>> (d_img_in, d_img_out, width, height);
// Utilizando shared memory e intentando solucionar conflictos de bancos (el tamaño del tile que no sea múlitplo del tamaño del bloque)
transpose_kernel_shared_noBankConflicts <<< grid, block >>> (d_img_in, d_img_out, width, height);
// Obtengo los posibles errores en la llamada al kernel
CUDA_CHK(cudaGetLastError());
// Obligo al Kernel a llegar al final de su ejecucion y así obtener los posibles errores
CUDA_CHK(cudaDeviceSynchronize());
// transferir resultado a RAM CPU:
CUDA_CHK(cudaMemcpy(img_out, d_img_out, size_img, cudaMemcpyDeviceToHost));
// liberar GPU global mem:
cudaFree(d_img_in);
cudaFree(d_img_out);
}
|
2fc2f7d679291d926f57c46cba7f888335ec0cfc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <iomanip>
#include <vector>
#include <cstdlib>
#include <hip/hip_fp16.h>
#define MAX(x, y) ((x>y) ? x : y)
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
double cal_tflops(int m, int n, int k, double msec)
{
double flops = 2. * m * n * k;
double tflops = (1E-12*flops) / (1E-3*msec);
return tflops;
}
__global__ void assignFloatValue (float *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
__global__ void assignHalfValue (half *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = __float2half(value);
//if(idx == 0)printf("Assign half precision value to out====%f\n", __half2float(out[idx]));
}
}
__global__ void assignHalftoFloatValue (half *in, int n, float *out) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = __half2float(in[idx]);
//if(idx==0)printf("Cast half to float ====%f\n", out[idx]);
}
}
void correctnessCheck(int m, int n, int k, float *host, float value){
for (int i = 0; i < m * n; i++) {
float val = host[i];
if ( val != k * value * value) {
std::cout << "ERROR value = " << val<< ", correct value="<< k * value * value << std::endl;
}
}
}
__global__ void halfCorrectnessCheck(half *in, int n, int k, float value){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
half valueh = __float2half(value);
float kf = k;
half kh = __float2half(kf);
half v = __hmul(kh,valueh);
v = __hmul(v,valueh);
if (idx < n) {
if( __heq(in[idx], v)) printf("ERROR value = %f, correct value = %f", __half2float(in[idx]),__half2float(v) );
}
}
void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){
float tflops = cal_tflops(m, n, k, cublasTime);
if (tflops > s_max_tflops){
s_max_tflops = tflops;
s_max_m_n = m;
s_max_k = k;
}
std::cout << std::setw(7) << m << ",";
std::cout << std::setw(7) << n << ",";
std::cout << std::setw(7) << k << ",";
std::cout << std::setw(15) << std::setprecision(4) << cublasTime << ",";
std::cout << std::setw(15) << std::setprecision(4) << tflops << "," << std::endl;
}
void calFP16Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_cublas_float;
float *c_host_cublas;
const float value = 0.1f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas_float, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
// alpha and beta MUST be the same type as compute type
half alpha = __float2half(1.0f);
half beta = __float2half(0.0f);
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, HIP_R_16F, m,
b_fp16, HIP_R_16F, n,
&beta,
c_cublas, HIP_R_16F, m,
HIP_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// Correctness check method1
// it will bring loss in half2float or float2half. For example, 0.1 will become 0.099976 after this change back and forth
/* assignHalftoFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, c_cublas_float);
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas_float, m * n * sizeof(float), hipMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
*/
// Correctness check method 2
hipLaunchKernelGGL(( halfCorrectnessCheck) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, k, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP16Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Warp up not really needed
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, HIP_R_16F, m,
b_fp16, HIP_R_16F, n,
&beta,
c_cublas, HIP_R_32F, m,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP32CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
float *a_fp32;
float *b_fp32;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp32, m * k * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&b_fp32, k * n * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignFloatValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp32, m*k, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp32, k*n, value);
hipLaunchKernelGGL(( assignFloatValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasSgemm(cublasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp32, m,
b_fp32, n,
&beta,
c_cublas, m);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime = 0.0f;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp32));
cudaErrCheck(hipFree(b_fp32));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
void calFP16CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(hipMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( assignHalfValue) , dim3((m * k + 255) / 256), dim3(256) , 0, 0, a_fp16, m*k, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, b_fp16, k*n, value);
hipLaunchKernelGGL(( assignHalfValue) , dim3((k * n + 255) / 256), dim3(256) , 0, 0, c_cublas, m*n, 0.0f);
half alpha = __float2half(1.0f);
half beta = __float2half(0.0f);
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
hipblasHgemm(cublasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp16, m,
b_fp16, n,
&beta,
c_cublas, m);
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}
int main(int argc, char* argv[]) {
int m,n,k;
std::string precision="NULL";
bool perf = true;
if (argc < 3) {
return EXIT_FAILURE;
}
// precision = INT8_TENSOR
// precision = FP16_TENSOR
// precision = FP16_32_TENSOR
// precision = FP32_CUDA
// precision = FP16_CUDA
if (argc == 3) {
precision = argv[1];
std::string tmp = argv[2];
if (tmp == "performance") perf= true;
else if (tmp == "pressure") perf = false;
else {
std::cout << "Invalid parameters!"<<std::endl;
return EXIT_FAILURE;
}
}
float s_max_tflops = 0;
int s_max_m_n = 0;
int s_max_k = 0;
int numRepeats;
/* // deprecated this INT8 test as it will achieve the best perf. Please refer to cublasLt
if (precision == "INT8_TENSOR" || precision == "NULL") {
std::cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TOPS";
std::cout << std::endl;
// for tensorcore test TODO: to verify the int8 with int8 accumulation
for(m=1024, n = 1024; m <= 25600; m+=1024, n+=1024) {
for(k=1024; k <= 20480; k+=1024) {
int8_t *a_;
int8_t *b_;
int *c_cublas;
int *c_host_cublas;
//const int value = 1;
hipblasHandle_t cublasHandle;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(hipMalloc((void**)&a_, m * k * sizeof(int8_t)));
cudaErrCheck(hipMalloc((void**)&b_, k * m * sizeof(int8_t)));
cudaErrCheck(hipMalloc((void**)&c_cublas, m * n * sizeof(int)));
c_host_cublas = (int*)malloc(m * n * sizeof(int));
//TODO hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
//assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
int alpha = 1;
int beta = 0;
int numRepeats = 1;
// Warp up not really needed here as many params will be tested
// Now using cuBLAS
cudaErrCheck(hipEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T,
m, n, k,
&alpha,
a_, HIP_R_8I, m,
b_, HIP_R_8I, n,
&beta,
c_cublas, HIP_R_32I, m,
HIP_R_32I, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopcublas));
cudaErrCheck(hipEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), hipMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_));
cudaErrCheck(hipFree(b_));
cudaErrCheck(hipFree(c_cublas));
free(c_host_cublas);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
*/
//======= for tensorcore test
// for perf test
if (precision == "FP16_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
// m=n=k=1024;
calFP16Tensor( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
// for perf test
if (precision == "FP16_32_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
// m=n=k=1024;
calFP16Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_32_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
//======= for cudacore test
if (precision == "FP32_CUDA" && perf == true) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP32CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP32_CUDA" && perf == false) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP32CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
// for perf test
if (precision == "FP16_CUDA" && perf == true) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(hipDeviceReset());
}
// for pressure test
if (precision == "FP16_CUDA" && perf == false) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(hipDeviceReset());
}
return 0;
}
| 2fc2f7d679291d926f57c46cba7f888335ec0cfc.cu | #include <iostream>
#include <curand.h>
#include <cublas_v2.h>
#include <iomanip>
#include <vector>
#include <cstdlib>
#include <cuda_fp16.h>
#define MAX(x, y) ((x>y) ? x : y)
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
double cal_tflops(int m, int n, int k, double msec)
{
double flops = 2. * m * n * k;
double tflops = (1E-12*flops) / (1E-3*msec);
return tflops;
}
__global__ void assignFloatValue (float *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = value;
}
}
__global__ void assignHalfValue (half *out, int n, float value) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = __float2half(value);
//if(idx == 0)printf("Assign half precision value to out====%f\n", __half2float(out[idx]));
}
}
__global__ void assignHalftoFloatValue (half *in, int n, float *out) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = __half2float(in[idx]);
//if(idx==0)printf("Cast half to float ====%f\n", out[idx]);
}
}
void correctnessCheck(int m, int n, int k, float *host, float value){
for (int i = 0; i < m * n; i++) {
float val = host[i];
if ( val != k * value * value) {
std::cout << "ERROR value = " << val<< ", correct value="<< k * value * value << std::endl;
}
}
}
__global__ void halfCorrectnessCheck(half *in, int n, int k, float value){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
half valueh = __float2half(value);
float kf = k;
half kh = __float2half(kf);
half v = __hmul(kh,valueh);
v = __hmul(v,valueh);
if (idx < n) {
if( __heq(in[idx], v)) printf("ERROR value = %f, correct value = %f", __half2float(in[idx]),__half2float(v) );
}
}
void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){
float tflops = cal_tflops(m, n, k, cublasTime);
if (tflops > s_max_tflops){
s_max_tflops = tflops;
s_max_m_n = m;
s_max_k = k;
}
std::cout << std::setw(7) << m << ",";
std::cout << std::setw(7) << n << ",";
std::cout << std::setw(7) << k << ",";
std::cout << std::setw(15) << std::setprecision(4) << cublasTime << ",";
std::cout << std::setw(15) << std::setprecision(4) << tflops << "," << std::endl;
}
void calFP16Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_cublas_float;
float *c_host_cublas;
const float value = 0.1f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas_float, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
// alpha and beta MUST be the same type as compute type
half alpha = __float2half(1.0f);
half beta = __float2half(0.0f);
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, CUDA_R_16F, m,
b_fp16, CUDA_R_16F, n,
&beta,
c_cublas, CUDA_R_16F, m,
CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// Correctness check method1
// it will bring loss in half2float or float2half. For example, 0.1 will become 0.099976 after this change back and forth
/* assignHalftoFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, c_cublas_float);
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas_float, m * n * sizeof(float), cudaMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
*/
// Correctness check method 2
halfCorrectnessCheck <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, k, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP16Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
// Warp up not really needed
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_fp16, CUDA_R_16F, m,
b_fp16, CUDA_R_16F, n,
&beta,
c_cublas, CUDA_R_32F, m,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP32CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
float *a_fp32;
float *b_fp32;
float *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp32, m * k * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&b_fp32, k * n * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignFloatValue <<< (m * k + 255) / 256, 256 >>> (a_fp32, m*k, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (b_fp32, k*n, value);
assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
float alpha = 1.0f;
float beta = 0.0f;
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasSgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp32, m,
b_fp32, n,
&beta,
c_cublas, m);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// Correctness check
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime = 0.0f;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp32));
cudaErrCheck(cudaFree(b_fp32));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
void calFP16CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){
half *a_fp16;
half *b_fp16;
half *c_cublas;
float *c_host_cublas;
const float value = 1.0f;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// No tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));
cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half)));
c_host_cublas = (float*)malloc(m * n * sizeof(float));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
half alpha = __float2half(1.0f);
half beta = __float2half(0.0f);
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasHgemm(cublasHandle,
CUBLAS_OP_N,
CUBLAS_OP_T,
m,
n,
k,
&alpha,
a_fp16, m,
b_fp16, n,
&beta,
c_cublas, m);
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}
int main(int argc, char* argv[]) {
int m,n,k;
std::string precision="NULL";
bool perf = true;
if (argc < 3) {
return EXIT_FAILURE;
}
// precision = INT8_TENSOR
// precision = FP16_TENSOR
// precision = FP16_32_TENSOR
// precision = FP32_CUDA
// precision = FP16_CUDA
if (argc == 3) {
precision = argv[1];
std::string tmp = argv[2];
if (tmp == "performance") perf= true;
else if (tmp == "pressure") perf = false;
else {
std::cout << "Invalid parameters!"<<std::endl;
return EXIT_FAILURE;
}
}
float s_max_tflops = 0;
int s_max_m_n = 0;
int s_max_k = 0;
int numRepeats;
/* // deprecated this INT8 test as it will achieve the best perf. Please refer to cublasLt
if (precision == "INT8_TENSOR" || precision == "NULL") {
std::cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TOPS";
std::cout << std::endl;
// for tensorcore test TODO: to verify the int8 with int8 accumulation
for(m=1024, n = 1024; m <= 25600; m+=1024, n+=1024) {
for(k=1024; k <= 20480; k+=1024) {
int8_t *a_;
int8_t *b_;
int *c_cublas;
int *c_host_cublas;
//const int value = 1;
cublasHandle_t cublasHandle;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// Use tensor cores
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));
cudaErrCheck(cudaMalloc((void**)&a_, m * k * sizeof(int8_t)));
cudaErrCheck(cudaMalloc((void**)&b_, k * m * sizeof(int8_t)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(int)));
c_host_cublas = (int*)malloc(m * n * sizeof(int));
//TODO curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
//assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value);
//assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f);
int alpha = 1;
int beta = 0;
int numRepeats = 1;
// Warp up not really needed here as many params will be tested
// Now using cuBLAS
cudaErrCheck(cudaEventRecord(startcublas));
for (int iteration = 0; iteration < numRepeats; ++iteration) {
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T,
m, n, k,
&alpha,
a_, CUDA_R_8I, m,
b_, CUDA_R_8I, n,
&beta,
c_cublas, CUDA_R_32I, m,
CUDA_R_32I, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopcublas));
cudaErrCheck(cudaEventSynchronize(stopcublas));
// TODO: Correctness check
//cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost));
//correctnessCheck(m, n, k, c_host_cublas, value);
// Check time
float cublasTime;
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
cublasTime /= numRepeats;
printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k);
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_));
cudaErrCheck(cudaFree(b_));
cudaErrCheck(cudaFree(c_cublas));
free(c_host_cublas);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
*/
//======= for tensorcore test
// for perf test
if (precision == "FP16_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
// m=n=k=1024;
calFP16Tensor( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
s_max_m_n = 0;
s_max_k = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
// for perf test
if (precision == "FP16_32_TENSOR" && perf == true) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
// m=n=k=1024;
calFP16Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_32_TENSOR" && perf == false) {
std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
//======= for cudacore test
if (precision == "FP32_CUDA" && perf == true) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP32CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP32_CUDA" && perf == false) {
std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP32CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
// for perf test
if (precision == "FP16_CUDA" && perf == true) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 10;
for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) {
for(k=1024; k <= 20480; k+=4096) {
calFP16CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats);
}}
std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl;
cudaErrCheck(cudaDeviceReset());
}
// for pressure test
if (precision == "FP16_CUDA" && perf == false) {
std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl;
std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k";
std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS";
std::cout << std::endl;
s_max_tflops = 0;
numRepeats = 2000;
std::vector<int> mnk={512, 1024, 5120, 10240};
for(int i=0; i<mnk.size(); i++) calFP16CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats);
cudaErrCheck(cudaDeviceReset());
}
return 0;
}
|
668ddc2d3b0a13ab7e0a5a8d594976448713cec8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_left [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_left_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(2,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(2,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[0][0] * dims_update_halo_kernel2_yvel_plus_2_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[1][0] * dims_update_halo_kernel2_yvel_plus_2_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_left[0][0], dims_update_halo_kernel2_yvel_plus_2_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_left[1][0], dims_update_halo_kernel2_yvel_plus_2_left[1][1], arg1);
update_halo_kernel2_yvel_plus_2_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_2_left");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_left_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_left_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_left_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_left_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_left_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_left_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_left_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_left_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_left, dims_update_halo_kernel2_yvel_plus_2_left_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_2_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 41;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 41;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
| 668ddc2d3b0a13ab7e0a5a8d594976448713cec8.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_left [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_left_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(2,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(2,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[0][0] * dims_update_halo_kernel2_yvel_plus_2_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_left[1][0] * dims_update_halo_kernel2_yvel_plus_2_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_left[0][0], dims_update_halo_kernel2_yvel_plus_2_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_left[1][0], dims_update_halo_kernel2_yvel_plus_2_left[1][1], arg1);
update_halo_kernel2_yvel_plus_2_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_2_left");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_left_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_left_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_left_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_left_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_left_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_left_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_left_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_left_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_left, dims_update_halo_kernel2_yvel_plus_2_left_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_2_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 41;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 41;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(41,"update_halo_kernel2_yvel_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
7e8cc993dee658598cc7ae1dda23ebd1e78ddc0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <png.h>
void abort_(const char * s, ...)
{
va_list args;
va_start(args, s);
vfprintf(stderr, s, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
int x, y;
int width, height;
png_byte color_type;
png_byte bit_depth;
png_structp png;
png_infop info;
int number_of_passes;
png_bytep *rowPointer;
png_bytep *rowPointer2;
png_bytep *rowPointer3;
void read_png_file(char* file_name)
{
char header[8];
FILE *fp = fopen(file_name, "rb");
if (!fp)
abort_("[read_png_file] File %s could not be opened for reading", file_name);
fread(header, 1, 8, fp);
png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[read_png_file] png_create_read_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[read_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during init_io");
png_init_io(png, fp);
png_set_sig_bytes(png, 8);
png_read_info(png, info);
width = png_get_image_width(png, info);
height = png_get_image_height(png, info);
color_type = png_get_color_type(png, info);
bit_depth = png_get_bit_depth(png, info);
number_of_passes = png_set_interlace_handling(png);
png_read_update_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during read_image");
rowPointer = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (y=0; y<height; y++)
rowPointer[y] = (png_byte*) malloc(png_get_rowbytes(png,info));
png_read_image(png, rowPointer);
fclose(fp);}
void write_png_file(char* file_name)
{
FILE *fp = fopen(file_name, "wb");
if (!fp)
abort_("[write_png_file] File %s could not be opened for writing", file_name);
png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[write_png_file] png_create_write_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[write_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during init_io");
png_init_io(png, fp);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing header");
png_set_IHDR(png, info, width, height,
bit_depth, color_type, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing bytes");
png_write_image(png, rowPointer);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during end of write");
png_write_end(png, NULL);
for (y=0; y<height; y++)
free(rowPointer[y]);
free(rowPointer);
fclose(fp);}
__global__ void myBlur3( int *r_I, int *g_I, int *b_I,
int totalPixels, int height, int nthrds,int begin2 , int end2, int p , int nrows , int width){
// int i = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
int istart,iend;
if ( idx!= 0 ) {
istart = idx * height*width / nthrds;
iend = (idx +1) * height *width/ nthrds;
if (iend + p >= height*width){
iend = (height*width)-p ;
}
}else {
istart =begin2;
iend = end2;
}
int i,aux;
// printf("start %i end %i width %i height %i \n", istart, iend,width, height );
// printf("empieza red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
// printf(" red %d green %d blue %d\n", r_I[iend], g_I[iend],b_I[iend] );
for( i = istart; i < iend; i++){
if ( i < width ){
aux=0;
}
if ( i+width >= (height*width) ){
aux=0;
}
if( nrows == 3){
//printf("START red %d green %d blue %d\n", r_I[i], g_I[i],b_I[i] );
r_I[i] = (double)(r_I[i]+r_I[i-1]+r_I[i+1]+r_I[i-aux]+r_I[i-1-aux]+r_I[i+1-aux]+r_I[i+aux]+r_I[i-1+aux]+r_I[i+1+aux])/9;
g_I[i] = (double)(g_I[i]+g_I[i-1]+g_I[i+1]+g_I[i-aux]+g_I[i-1-aux]+g_I[i+1-aux]+g_I[i+aux]+g_I[i-1+aux]+g_I[i+1+aux])/9;
b_I[i] = (double)(b_I[i]+b_I[i-1]+b_I[i+1]+b_I[i-aux]+b_I[i-1-aux]+b_I[i+1-aux]+b_I[i+aux]+b_I[i-1+aux]+b_I[i+1+aux])/9;
}
if( nrows == 5){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i+1]+r_I[i+2]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i+1-aux]+r_I[i+2-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i+1+aux]+r_I[i+2+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)])/25;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i+1]+g_I[i+2]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i+1-aux]+g_I[i+2-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i+1+aux]+g_I[i+2+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)])/25;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i+1]+b_I[i+2]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i+1-aux]+b_I[i+2-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i+1+aux]+b_I[i+2+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)])/25;
}
if( nrows == 7){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i+1]+r_I[i+2]+r_I[i+3]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)])/49;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i+1]+g_I[i+2]+g_I[i+3]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)])/49;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i+1]+b_I[i+2]+b_I[i+3]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)])/49;
}
if( nrows == 9){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)]+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] )/81;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+g_I[i+4-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)]+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)]
)/81;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+b_I[i+4-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)]+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)]
)/81;
}
if( nrows == 11){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)]+ + r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+
r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]
)/121;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)]+ + g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+
g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]
)/121;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)]+ + b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+
b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]
)/121;
}
if( nrows == 13){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]
)/169;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]
)/169;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]
)/169;
}
if( nrows == 15){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+r_I[i+7]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+r_I[i+7-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+r_I[i+7-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+r_I[i+7-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+r_I[i+7+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+r_I[i+7+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+r_I[i+7+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]+r_I[i-7+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]+r_I[i-7-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +r_I[i+7-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]+r_I[i+7+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+r_I[i+7-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]+r_I[i+7+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]+r_I[i+7-(aux*6)]
+r_I[i+1+(aux*7)]+r_I[i+2+(aux*7)]+r_I[i+3+(aux*7)]+r_I[i+4+(aux*7)] +r_I[i+5+(aux*7)]+r_I[i+6+(aux*7)]+r_I[i+7+(aux*7)]
+ r_I[i-(aux*7)]+r_I[i-1-(aux*7)]+r_I[i-2-(aux*7)]+r_I[i-3-(aux*7)]+r_I[i-4-(aux*7)]+r_I[i-5-(aux*7)]+r_I[i+1-(aux*7)]+r_I[i+2-(aux*7)]+r_I[i+3-(aux*7)]+r_I[i+4-(aux*7)] +r_I[i+5-(aux*7)]+r_I[i+6-(aux*7)]+r_I[i+7-(aux*7)]
)/225;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+b_I[i+7]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+b_I[i+7-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+b_I[i+7-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+b_I[i+7-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+b_I[i+7+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+b_I[i+7+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+b_I[i+7+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]+b_I[i-7+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]+b_I[i-7-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +b_I[i+7-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]+b_I[i+7+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+b_I[i+7-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]+b_I[i+7+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]+b_I[i+7-(aux*6)]
+b_I[i+1+(aux*7)]+b_I[i+2+(aux*7)]+b_I[i+3+(aux*7)]+b_I[i+4+(aux*7)] +b_I[i+5+(aux*7)]+b_I[i+6+(aux*7)]+b_I[i+7+(aux*7)]
+ b_I[i-(aux*7)]+b_I[i-1-(aux*7)]+b_I[i-2-(aux*7)]+b_I[i-3-(aux*7)]+b_I[i-4-(aux*7)]+b_I[i-5-(aux*7)]+b_I[i+1-(aux*7)]+b_I[i+2-(aux*7)]+b_I[i+3-(aux*7)]+b_I[i+4-(aux*7)] +b_I[i+5-(aux*7)]+b_I[i+6-(aux*7)]+b_I[i+7-(aux*7)]
)/225;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+g_I[i+7]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+g_I[i+7-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+g_I[i+7-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+g_I[i+7-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+g_I[i+7+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+g_I[i+7+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+g_I[i+7+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]+g_I[i-7+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]+g_I[i-7-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +g_I[i+7-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]+g_I[i+7+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+g_I[i+7-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]+g_I[i+7+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]+g_I[i+7-(aux*6)]
+g_I[i+1+(aux*7)]+g_I[i+2+(aux*7)]+g_I[i+3+(aux*7)]+g_I[i+4+(aux*7)] +g_I[i+5+(aux*7)]+g_I[i+6+(aux*7)]+g_I[i+7+(aux*7)]
+ g_I[i-(aux*7)]+g_I[i-1-(aux*7)]+g_I[i-2-(aux*7)]+g_I[i-3-(aux*7)]+g_I[i-4-(aux*7)]+g_I[i-5-(aux*7)]+g_I[i+1-(aux*7)]+g_I[i+2-(aux*7)]+g_I[i+3-(aux*7)]+g_I[i+4-(aux*7)] +g_I[i+5-(aux*7)]+g_I[i+6-(aux*7)]+g_I[i+7-(aux*7)]
)/225;
}
}//END FOR
// printf("TERMINA1 red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
__syncthreads();
}
int main(int argc, char *argv[]) {
if(argc < 4){
printf("Por favor ingresar datos as: nombreimagen.png nuevaimagen.png #kernel #hilos #bloques\n");
exit(0);}
read_png_file(argv[1]);
png_byte* row;
png_byte desrow;
png_byte desrow2;
png_byte desrow3;
png_byte* wrow;
//int totalP = *width * *height;
int numthreads = atoi(argv[4]);
//int numblocks = atoi(argv[5]);
char *res = (char*) malloc(30);
int totalPixels = width * height;
int x;
int inputKernel = atoi(argv[3]);
int kernel = inputKernel/2;
int divi, begin, end, begin2, end2,tnum,id, p,fin;
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
size_t size = totalPixels * sizeof(float);
// Allocate the host input vector R
int *h_rI = (int *)malloc(size);
// Allocate the host input vector G
int *h_gI = (int *)malloc(size);
// Allocate the host input vector B
int *h_bI = (int *)malloc(size);
// Verify that allocations succeeded
if (h_rI == NULL || h_gI == NULL || h_bI == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
desrow = wrow[0];
desrow2 = wrow[1];
desrow3 = wrow[2];
h_rI[x] = desrow;
h_gI[x] = desrow2;
h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
// desrow = g[x];
// desrow2 = b[x];
// desrow3 = r[x];
// wrow[0] = desrow;
// wrow[1] = desrow2;
// wrow[2] = desrow3;
// row[d*4] = *wrow;
x++;
}
}
// Allocate the device input vector R
int *d_rI = NULL;
err = hipMalloc((void **)&d_rI, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector G
int *d_gI = NULL;
err = hipMalloc((void **)&d_gI, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
int *d_bI = NULL;
err = hipMalloc((void **)&d_bI, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector R
int *d_rO = NULL;
err = hipMalloc((void **)&d_rO, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector G
int *d_gO = NULL;
err = hipMalloc((void **)&d_gO, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector B
int *d_bO = NULL;
err = hipMalloc((void **)&d_bO, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
//printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_rI, h_rI, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector r from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_gI, h_gI, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector g from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_bI, h_bI, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector b from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
divi = (height*width/numthreads) ;
float rest = height%numthreads;
//printf("divi %i rest %f height %i \n", divi,rest,height);
end2 = divi;
if (inputKernel == 3){begin2 = 1;
p = 1;
}
if(inputKernel == 5){begin2 = 2;
p = 2;
}
if(inputKernel == 7){begin2= 3;
p = 3;
}
if(inputKernel == 9){begin2= 4;
p = 4;
}
if(inputKernel == 11){begin2= 5;
p = 5;
}
if(inputKernel == 13){begin2= 6;
p = 6;
}
if(inputKernel == 15){
begin2= 7;
p = 7;
}
fin = begin2;
if (numthreads == 1 ){
end2 = end2 - 7;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = numthreads;
int blocksPerGrid =(totalPixels + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( myBlur3), dim3(1), dim3(numthreads), 0, 0, d_rI, d_gI, d_bI, totalPixels, height,numthreads,begin2,end2,p,inputKernel,width);
//myBlur<<<blocksPerGrid, threadsPerBlock>>>(d_rI, d_gI, d_bI, d_rO, d_gO, d_bO, totalPixels, kernel);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch myBlur kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("XX 2 red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_rI, d_rI, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector rI from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_gI, d_gI, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector gI from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_bI, d_bI, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector bI from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("TERNA red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
// desrow = wrow[0];
// desrow2 = wrow[1];
// desrow3 = wrow[2];
// h_rI[x] = desrow;
// h_gI[x] = desrow2;
// h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
desrow = h_rI[x];
desrow2 = h_gI[x];
desrow3 = h_bI[x];
wrow[0] = desrow;
wrow[1] = desrow2;
wrow[2] = desrow3;
row[d*4] = *wrow;
x++;
}
}
//printf("Test PASSED\n");
write_png_file(argv[2]);
// Free device global memory
err = hipFree(d_rI);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector rI (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_gI);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector gI (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_bI);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector bI (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_rI);
free(h_gI);
free(h_bI);
return(0);
}
| 7e8cc993dee658598cc7ae1dda23ebd1e78ddc0b.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <png.h>
void abort_(const char * s, ...)
{
va_list args;
va_start(args, s);
vfprintf(stderr, s, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
int x, y;
int width, height;
png_byte color_type;
png_byte bit_depth;
png_structp png;
png_infop info;
int number_of_passes;
png_bytep *rowPointer;
png_bytep *rowPointer2;
png_bytep *rowPointer3;
void read_png_file(char* file_name)
{
char header[8];
FILE *fp = fopen(file_name, "rb");
if (!fp)
abort_("[read_png_file] File %s could not be opened for reading", file_name);
fread(header, 1, 8, fp);
png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[read_png_file] png_create_read_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[read_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during init_io");
png_init_io(png, fp);
png_set_sig_bytes(png, 8);
png_read_info(png, info);
width = png_get_image_width(png, info);
height = png_get_image_height(png, info);
color_type = png_get_color_type(png, info);
bit_depth = png_get_bit_depth(png, info);
number_of_passes = png_set_interlace_handling(png);
png_read_update_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during read_image");
rowPointer = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (y=0; y<height; y++)
rowPointer[y] = (png_byte*) malloc(png_get_rowbytes(png,info));
png_read_image(png, rowPointer);
fclose(fp);}
void write_png_file(char* file_name)
{
FILE *fp = fopen(file_name, "wb");
if (!fp)
abort_("[write_png_file] File %s could not be opened for writing", file_name);
png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[write_png_file] png_create_write_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[write_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during init_io");
png_init_io(png, fp);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing header");
png_set_IHDR(png, info, width, height,
bit_depth, color_type, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing bytes");
png_write_image(png, rowPointer);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during end of write");
png_write_end(png, NULL);
for (y=0; y<height; y++)
free(rowPointer[y]);
free(rowPointer);
fclose(fp);}
__global__ void myBlur3( int *r_I, int *g_I, int *b_I,
int totalPixels, int height, int nthrds,int begin2 , int end2, int p , int nrows , int width){
// int i = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
int istart,iend;
if ( idx!= 0 ) {
istart = idx * height*width / nthrds;
iend = (idx +1) * height *width/ nthrds;
if (iend + p >= height*width){
iend = (height*width)-p ;
}
}else {
istart =begin2;
iend = end2;
}
int i,aux;
// printf("start %i end %i width %i height %i \n", istart, iend,width, height );
// printf("empieza red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
// printf(" red %d green %d blue %d\n", r_I[iend], g_I[iend],b_I[iend] );
for( i = istart; i < iend; i++){
if ( i < width ){
aux=0;
}
if ( i+width >= (height*width) ){
aux=0;
}
if( nrows == 3){
//printf("START red %d green %d blue %d\n", r_I[i], g_I[i],b_I[i] );
r_I[i] = (double)(r_I[i]+r_I[i-1]+r_I[i+1]+r_I[i-aux]+r_I[i-1-aux]+r_I[i+1-aux]+r_I[i+aux]+r_I[i-1+aux]+r_I[i+1+aux])/9;
g_I[i] = (double)(g_I[i]+g_I[i-1]+g_I[i+1]+g_I[i-aux]+g_I[i-1-aux]+g_I[i+1-aux]+g_I[i+aux]+g_I[i-1+aux]+g_I[i+1+aux])/9;
b_I[i] = (double)(b_I[i]+b_I[i-1]+b_I[i+1]+b_I[i-aux]+b_I[i-1-aux]+b_I[i+1-aux]+b_I[i+aux]+b_I[i-1+aux]+b_I[i+1+aux])/9;
}
if( nrows == 5){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i+1]+r_I[i+2]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i+1-aux]+r_I[i+2-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i+1+aux]+r_I[i+2+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)])/25;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i+1]+g_I[i+2]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i+1-aux]+g_I[i+2-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i+1+aux]+g_I[i+2+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)])/25;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i+1]+b_I[i+2]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i+1-aux]+b_I[i+2-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i+1+aux]+b_I[i+2+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)])/25;
}
if( nrows == 7){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i+1]+r_I[i+2]+r_I[i+3]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)])/49;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i+1]+g_I[i+2]+g_I[i+3]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)])/49;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i+1]+b_I[i+2]+b_I[i+3]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)])/49;
}
if( nrows == 9){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)]+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] )/81;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+g_I[i+4-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)]+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)]
)/81;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+b_I[i+4-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)]+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)]
)/81;
}
if( nrows == 11){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)]+ + r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+
r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]
)/121;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)]+ + g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+
g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]
)/121;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)]+ + b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+
b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]
)/121;
}
if( nrows == 13){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]
)/169;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]
)/169;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]
)/169;
}
if( nrows == 15){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+r_I[i+7]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+r_I[i+7-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+r_I[i+7-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+r_I[i+7-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+r_I[i+7+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+r_I[i+7+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+r_I[i+7+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]+r_I[i-7+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]+r_I[i-7-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +r_I[i+7-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]+r_I[i+7+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+r_I[i+7-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]+r_I[i+7+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]+r_I[i+7-(aux*6)]
+r_I[i+1+(aux*7)]+r_I[i+2+(aux*7)]+r_I[i+3+(aux*7)]+r_I[i+4+(aux*7)] +r_I[i+5+(aux*7)]+r_I[i+6+(aux*7)]+r_I[i+7+(aux*7)]
+ r_I[i-(aux*7)]+r_I[i-1-(aux*7)]+r_I[i-2-(aux*7)]+r_I[i-3-(aux*7)]+r_I[i-4-(aux*7)]+r_I[i-5-(aux*7)]+r_I[i+1-(aux*7)]+r_I[i+2-(aux*7)]+r_I[i+3-(aux*7)]+r_I[i+4-(aux*7)] +r_I[i+5-(aux*7)]+r_I[i+6-(aux*7)]+r_I[i+7-(aux*7)]
)/225;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+b_I[i+7]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+b_I[i+7-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+b_I[i+7-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+b_I[i+7-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+b_I[i+7+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+b_I[i+7+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+b_I[i+7+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]+b_I[i-7+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]+b_I[i-7-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +b_I[i+7-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]+b_I[i+7+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+b_I[i+7-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]+b_I[i+7+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]+b_I[i+7-(aux*6)]
+b_I[i+1+(aux*7)]+b_I[i+2+(aux*7)]+b_I[i+3+(aux*7)]+b_I[i+4+(aux*7)] +b_I[i+5+(aux*7)]+b_I[i+6+(aux*7)]+b_I[i+7+(aux*7)]
+ b_I[i-(aux*7)]+b_I[i-1-(aux*7)]+b_I[i-2-(aux*7)]+b_I[i-3-(aux*7)]+b_I[i-4-(aux*7)]+b_I[i-5-(aux*7)]+b_I[i+1-(aux*7)]+b_I[i+2-(aux*7)]+b_I[i+3-(aux*7)]+b_I[i+4-(aux*7)] +b_I[i+5-(aux*7)]+b_I[i+6-(aux*7)]+b_I[i+7-(aux*7)]
)/225;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+g_I[i+7]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+g_I[i+7-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+g_I[i+7-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+g_I[i+7-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+g_I[i+7+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+g_I[i+7+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+g_I[i+7+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]+g_I[i-7+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]+g_I[i-7-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +g_I[i+7-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]+g_I[i+7+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+g_I[i+7-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]+g_I[i+7+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]+g_I[i+7-(aux*6)]
+g_I[i+1+(aux*7)]+g_I[i+2+(aux*7)]+g_I[i+3+(aux*7)]+g_I[i+4+(aux*7)] +g_I[i+5+(aux*7)]+g_I[i+6+(aux*7)]+g_I[i+7+(aux*7)]
+ g_I[i-(aux*7)]+g_I[i-1-(aux*7)]+g_I[i-2-(aux*7)]+g_I[i-3-(aux*7)]+g_I[i-4-(aux*7)]+g_I[i-5-(aux*7)]+g_I[i+1-(aux*7)]+g_I[i+2-(aux*7)]+g_I[i+3-(aux*7)]+g_I[i+4-(aux*7)] +g_I[i+5-(aux*7)]+g_I[i+6-(aux*7)]+g_I[i+7-(aux*7)]
)/225;
}
}//END FOR
// printf("TERMINA1 red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
__syncthreads();
}
int main(int argc, char *argv[]) {
if(argc < 4){
printf("Por favor ingresar datos así: nombreimagen.png nuevaimagen.png #kernel #hilos #bloques\n");
exit(0);}
read_png_file(argv[1]);
png_byte* row;
png_byte desrow;
png_byte desrow2;
png_byte desrow3;
png_byte* wrow;
//int totalP = *width * *height;
int numthreads = atoi(argv[4]);
//int numblocks = atoi(argv[5]);
char *res = (char*) malloc(30);
int totalPixels = width * height;
int x;
int inputKernel = atoi(argv[3]);
int kernel = inputKernel/2;
int divi, begin, end, begin2, end2,tnum,id, p,fin;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
size_t size = totalPixels * sizeof(float);
// Allocate the host input vector R
int *h_rI = (int *)malloc(size);
// Allocate the host input vector G
int *h_gI = (int *)malloc(size);
// Allocate the host input vector B
int *h_bI = (int *)malloc(size);
// Verify that allocations succeeded
if (h_rI == NULL || h_gI == NULL || h_bI == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
desrow = wrow[0];
desrow2 = wrow[1];
desrow3 = wrow[2];
h_rI[x] = desrow;
h_gI[x] = desrow2;
h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
// desrow = g[x];
// desrow2 = b[x];
// desrow3 = r[x];
// wrow[0] = desrow;
// wrow[1] = desrow2;
// wrow[2] = desrow3;
// row[d*4] = *wrow;
x++;
}
}
// Allocate the device input vector R
int *d_rI = NULL;
err = cudaMalloc((void **)&d_rI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector G
int *d_gI = NULL;
err = cudaMalloc((void **)&d_gI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
int *d_bI = NULL;
err = cudaMalloc((void **)&d_bI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector R
int *d_rO = NULL;
err = cudaMalloc((void **)&d_rO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector G
int *d_gO = NULL;
err = cudaMalloc((void **)&d_gO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector B
int *d_bO = NULL;
err = cudaMalloc((void **)&d_bO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
//printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_rI, h_rI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector r from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_gI, h_gI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector g from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_bI, h_bI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector b from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
divi = (height*width/numthreads) ;
float rest = height%numthreads;
//printf("divi %i rest %f height %i \n", divi,rest,height);
end2 = divi;
if (inputKernel == 3){begin2 = 1;
p = 1;
}
if(inputKernel == 5){begin2 = 2;
p = 2;
}
if(inputKernel == 7){begin2= 3;
p = 3;
}
if(inputKernel == 9){begin2= 4;
p = 4;
}
if(inputKernel == 11){begin2= 5;
p = 5;
}
if(inputKernel == 13){begin2= 6;
p = 6;
}
if(inputKernel == 15){
begin2= 7;
p = 7;
}
fin = begin2;
if (numthreads == 1 ){
end2 = end2 - 7;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = numthreads;
int blocksPerGrid =(totalPixels + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
myBlur3<<<1, numthreads>>>(d_rI, d_gI, d_bI, totalPixels, height,numthreads,begin2,end2,p,inputKernel,width);
//myBlur<<<blocksPerGrid, threadsPerBlock>>>(d_rI, d_gI, d_bI, d_rO, d_gO, d_bO, totalPixels, kernel);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch myBlur kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("XX 2 red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_rI, d_rI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector rI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_gI, d_gI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector gI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_bI, d_bI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector bI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("TERNA red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
// desrow = wrow[0];
// desrow2 = wrow[1];
// desrow3 = wrow[2];
// h_rI[x] = desrow;
// h_gI[x] = desrow2;
// h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
desrow = h_rI[x];
desrow2 = h_gI[x];
desrow3 = h_bI[x];
wrow[0] = desrow;
wrow[1] = desrow2;
wrow[2] = desrow3;
row[d*4] = *wrow;
x++;
}
}
//printf("Test PASSED\n");
write_png_file(argv[2]);
// Free device global memory
err = cudaFree(d_rI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector rI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_gI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector gI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_bI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector bI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_rI);
free(h_gI);
free(h_bI);
return(0);
}
|
340347bf704b4e0ea09d8e047a049c670ac04c26.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2019 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
#include <cstdint>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "parameters.h"
#include "macro.h"
#if TORCH_HIP_VERSION >= 9000
#define SHFL_UP(var, delta) __shfl_up_sync(0xffffffff, (var), (delta))
#define SHFL_DOWN(var, delta) __shfl_down_sync(0xffffffff, (var), (delta))
#define SHFL(var, srcLane) __shfl_sync(0xffffffff, (var), (srcLane))
#else
#define SHFL_UP(var, delta) __shfl_up((var), (delta))
#define SHFL_DOWN(var, delta) __shfl_down((var), (delta))
#define SHFL(var, srcLane) __shfl((var), (srcLane))
#endif
namespace sgm
{
enum { FORE = 0, BACK = 1 };
constexpr int WARP_SIZE = 32;
constexpr int MC_MAX_DIFF = 64;
constexpr float SIGMA_LINE_A = 0.01f;
constexpr float SIGMA_LINE_B = 1.f;
constexpr int MAX_WARPS_PER_DMAX = MAX_DISPARITY / WARP_SIZE;
constexpr int WTA_WARPS_PER_BLOCK = 2;
static int divUp(int total, int grain)
{
return (total + grain - 1) / grain;
}
template <typename T>
__inline__ __device__ T partialselect(T* buf, int n, int m)
{
for (int i = 0; i <= m; i++)
{
int mi = i;
T mv = buf[i];
for (int j = i + 1; j < n; j++)
{
if (buf[j] < mv)
{
mi = j;
mv = buf[j];
T t = buf[i]; buf[i] = buf[mi]; buf[mi] = t;
}
}
}
return buf[m];
}
template <typename T>
__inline__ __device__ T median(T* buf, int n)
{
const int m = n / 2;
return partialselect(buf, n, m);
}
template <typename T>
__global__ void horizontalMedianKernel(const T* __restrict__ disparity, int pitch,
T* columns, int umax, int vmax, int segmentWidth)
{
T buf[MAX_SEGMENT_WIDTH];
const int v = blockIdx.y * blockDim.y + threadIdx.y;
const int u = blockIdx.x * blockDim.x + threadIdx.x;
if (v >= vmax || u >= umax)
return;
for (int du = 0; du < segmentWidth; du++)
{
const T d = disparity[v * pitch + u * segmentWidth + du];
buf[du] = d < MAX_DISPARITY ? d : 0;
}
// get median
const T m = median(buf, segmentWidth);
// store with transposed
columns[u * vmax + v] = m;
}
__inline__ __device__ void warpShuffleReduction(const uint16_t* __restrict__ L, uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX],
uint16_t minCostsGO[2], int& minCostOO, int dmax, int droad, int vp, int tid)
{
const int tmax = dmax / WARP_SIZE;
int prevMinWarp[2], currMinWarp[2];
prevMinWarp[FORE] = P_INF;
prevMinWarp[BACK] = P_INF;
for (int t = 0, k = tid; t < tmax; t++, k += WARP_SIZE)
{
uint16_t localL[2];
localL[FORE] = L[k];
localL[BACK] = L[dmax - 1 - k];
// warp shfl reduction
for (int d = 1; d < 32; d <<= 1)
{
localL[FORE] = min(localL[FORE], SHFL_UP(localL[FORE], d));
localL[BACK] = min(localL[BACK], SHFL_UP(localL[BACK], d));
}
// calc min of all(before current warp)
localL[FORE] = min(localL[FORE], prevMinWarp[FORE]);
localL[BACK] = min(localL[BACK], prevMinWarp[BACK]);
currMinWarp[FORE] = SHFL(localL[FORE], 31);
currMinWarp[BACK] = SHFL(localL[BACK], 31);
// shift up by 1 elem
localL[FORE] = SHFL_UP(localL[FORE], 1);
localL[BACK] = SHFL_UP(localL[BACK], 1);
// first elem of warp is last elem value of one before warp
if (tid == 0)
{
localL[FORE] = t == 0 ? P_INF : prevMinWarp[FORE];
localL[BACK] = t == 0 ? P_INF : prevMinWarp[BACK];
}
// update last elem value
prevMinWarp[FORE] = currMinWarp[FORE];
prevMinWarp[BACK] = currMinWarp[BACK];
minCostsOO[FORE][t] = localL[FORE];
minCostsOO[BACK][tmax - 1 - t] = SHFL(localL[BACK], 31 - tid); // reversing [BACK] elems
if (droad <= 0 && t == tmax - 1)
minCostsGO[BACK] = currMinWarp[BACK];
else if (droad > 0 && t == tmax - 1 - droad / WARP_SIZE)
minCostsGO[BACK] = SHFL(localL[BACK], WARP_SIZE - 1 - (droad % WARP_SIZE));
}
minCostOO = SHFL(prevMinWarp[BACK], 0);
}
template <typename T>
__global__ void scanCostForeKernel(const T* __restrict__ disparity, int umax, int vmax, int dmax,
const float* __restrict__ road, int vhor, uint16_t* __restrict__ L)
{
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
if (uc > umax)
return;
const int tid = threadIdx.x;
const int n = dmax + 1;
// cost buffers
L += uc * vmax * n;
extern __shared__ uint16_t Lp[];
uint16_t* tmpLp = Lp + n;
uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX];
uint16_t minCostsGO[2];
int minCostOO = P_INF;
for (int vc = 0; vc < vmax; vc++)
{
uint16_t* Lc = L + vc * n;
const int vp = vc - 1;
const bool inside = vp >= 0 && vp < vmax;
const int d = disparity[uc * vmax + vc];
const int droad = lroundf(road[vc]);
const float sigmad = sqrtf(SIGMA_LINE_A * SIGMA_LINE_A * vc * vc + SIGMA_LINE_B * SIGMA_LINE_B);
if (inside)
warpShuffleReduction(Lp, minCostsOO, minCostsGO, minCostOO, dmax, droad, vp, tid);
const int minCost = inside ? min(minCostOO, static_cast<int>(Lp[dmax])) : 0;
for (int k = tid, t = 0; k < n; k += WARP_SIZE, t++)
{
const bool object = k < dmax;
// matching cost
const int dhat = object ? k : droad;
const int diff = object ? abs(d - dhat) : lroundf(max(d - dhat, 0) / sigmad);
const int MC = min(diff, MC_MAX_DIFF);
// transition cost
int PT = 0;
if (inside)
{
const int cost0 = Lp[k]; // no penalty
int cost1 = P_INF, cost2 = P_INF, cost3 = P_INF;
if (object)
{
// Object from Object
cost1 = minCostsOO[FORE][t] + P_OO1;
cost2 = minCostsOO[BACK][t] + P_OO2;
// Object from Ground
cost3 = k > droad ? Lp[dmax] + P_OG2 : P_INF;
}
else
{
// Ground from Object
cost3 = droad >= 0 && droad < dmax ? Lp[droad] + P_OG1 : P_INF;
}
PT = min(min(cost0, cost1), min(cost2, cost3));
}
const int cost = MC + PT - minCost;
Lc[k] = cost;
tmpLp[k] = cost;
}
for (int k = tid; k < n; k += WARP_SIZE)
Lp[k] = tmpLp[k];
}
}
template <typename T>
__global__ void scanCostBackKernel(const T* __restrict__ disparity, int umax, int vmax, int dmax,
const float* __restrict__ DR, int vhor, uint16_t* __restrict__ L)
{
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
if (uc > umax)
return;
const int tid = threadIdx.x;
const int n = dmax + 1;
// cost buffers
L += uc * vmax * n;
extern __shared__ uint16_t Lp[];
uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX];
uint16_t minCostsGO[2];
int minCostOO = P_INF;
for (int vc = vmax - 1; vc >= 0; vc--)
{
uint16_t* Lc = L + vc * n;
const int vp = vc + 1;
const bool inside = vp >= 0 && vp < vmax;
const int d = disparity[uc * vmax + vc];
const int droad = lroundf(DR[vc]);
const float sigmad = sqrtf(SIGMA_LINE_A * SIGMA_LINE_A * vc * vc + SIGMA_LINE_B * SIGMA_LINE_B);
if (inside)
warpShuffleReduction(Lp, minCostsOO, minCostsGO, minCostOO, dmax, droad, vp, tid);
const int minCost = inside ? min(minCostOO, static_cast<int>(Lp[dmax])) : 0;
for (int k = tid, t = 0; k < n; k += WARP_SIZE, t++)
{
const bool object = k < dmax;
// matching cost
const int dhat = object ? k : droad;
const int diff = object ? abs(d - dhat) : lroundf(max(d - dhat, 0) / sigmad);
const int MC = min(diff, MC_MAX_DIFF);
// transition cost
int PT = 0;
if (inside)
{
const int cost0 = Lp[k]; // no penalty
int cost1 = P_INF, cost2 = P_INF, cost3 = P_INF;
if (object)
{
// Object from Object
cost1 = minCostsOO[FORE][t] + P_OO2;
cost2 = minCostsOO[BACK][t] + P_OO1;
// Object from Ground
cost3 = k == droad ? Lp[dmax] + P_OG1 : P_INF;
}
else
{
// Ground from Object
cost3 = minCostsGO[BACK] + P_OG2;
}
PT = min(min(cost0, cost1), min(cost2, cost3));
}
const int cost = MC + PT - minCost;
Lc[k] = cost;
Lp[k] = cost;
}
}
}
__global__ void winnerTakesAllKernel(const uint16_t* __restrict__ LFore, const uint16_t* __restrict__ LBack,
uint16_t* __restrict__ labels, int umax, int vmax, int dmax, int vhor)
{
const int warpIdx = threadIdx.x / WARP_SIZE;
const int laneIdx = threadIdx.x % WARP_SIZE;
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
const int vc = blockIdx.x * WTA_WARPS_PER_BLOCK + warpIdx;
if (uc >= umax || vc >= vmax)
return;
const int n = dmax + 1;
int minSCost = INT_MAX;
int label = -1;
LFore += (uc * vmax + vc) * n;
LBack += (uc * vmax + vc) * n;
for (int t = 0, k = laneIdx; t * WARP_SIZE < n; t++, k += WARP_SIZE)
{
const bool object = k < dmax;
const bool invalid = (!object && vc <= vhor) || k >= n;
int scost = invalid ? P_INF : LFore[k] + LBack[k];
int mink = k;
for (int d = 16; d > 0; d >>= 1)
{
int _scost = SHFL_DOWN(scost, d);
int _mink = SHFL_DOWN(mink, d);
if (_scost < scost)
{
scost = _scost;
mink = _mink;
}
else if (_scost == scost)
mink = min(mink, _mink);
}
if (laneIdx == 0 && scost < minSCost)
{
minSCost = scost;
label = mink;
}
}
if (laneIdx == 0)
labels[uc * vmax + vc] = label;
}
template <typename T>
void horizontalMedianGPU(const T* D, int pitch, T* columns, int umax, int vmax, int segmentWidth)
{
const dim3 block(32, 32);
const dim3 grid(divUp(umax, block.x), divUp(vmax, block.y));
hipLaunchKernelGGL(( horizontalMedianKernel), dim3(grid), dim3(block), 0, 0, D, pitch, columns, umax, vmax, segmentWidth);
CUDA_CHECK(hipGetLastError());
}
template void horizontalMedianGPU<uint8_t>(const uint8_t* D, int pitch, uint8_t* columns, int umax, int vmax, int segmentWidth);
template void horizontalMedianGPU<uint16_t>(const uint16_t* D, int pitch, uint16_t* columns, int umax, int vmax, int segmentWidth);
template <typename T>
void scanCostForeGPU(const T* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L,
hipStream_t stream)
{
const dim3 grid(1, umax);
const dim3 block(32, 1);
const int n = dmax + 1;
const size_t sharedSize = sizeof(uint16_t) * 2 * n;
hipLaunchKernelGGL(( scanCostForeKernel), dim3(grid), dim3(block), sharedSize, stream, D, umax, vmax, dmax, DR, vhor, L);
CUDA_CHECK(hipGetLastError());
}
template void scanCostForeGPU<uint8_t>(const uint8_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, hipStream_t stream);
template void scanCostForeGPU<uint16_t>(const uint16_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, hipStream_t stream);
template <typename T>
void scanCostBackGPU(const T* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L,
hipStream_t stream)
{
const dim3 grid(1, umax);
const dim3 block(32, 1);
const int n = dmax + 1;
const size_t sharedSize = sizeof(uint16_t) * n;
hipLaunchKernelGGL(( scanCostBackKernel), dim3(grid), dim3(block), sharedSize, stream, D, umax, vmax, dmax, DR, vhor, L);
CUDA_CHECK(hipGetLastError());
}
template void scanCostBackGPU<uint8_t>(const uint8_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, hipStream_t stream);
template void scanCostBackGPU<uint16_t>(const uint16_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, hipStream_t stream);
void winnerTakesAllGPU(uint16_t* LFore, uint16_t* LBack, uint16_t* labels,
int umax, int vmax, int dmax, int vhor)
{
const dim3 grid(divUp(vmax, WTA_WARPS_PER_BLOCK), umax);
const dim3 block(WTA_WARPS_PER_BLOCK * WARP_SIZE, 1);
hipLaunchKernelGGL(( winnerTakesAllKernel), dim3(grid), dim3(block), 0, 0, LFore, LBack, labels, umax, vmax, dmax, vhor);
CUDA_CHECK(hipGetLastError());
}
} // namespace sgm
| 340347bf704b4e0ea09d8e047a049c670ac04c26.cu | /*
Copyright 2019 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "internal.h"
#include <cstdint>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "parameters.h"
#include "macro.h"
#if CUDA_VERSION >= 9000
#define SHFL_UP(var, delta) __shfl_up_sync(0xffffffff, (var), (delta))
#define SHFL_DOWN(var, delta) __shfl_down_sync(0xffffffff, (var), (delta))
#define SHFL(var, srcLane) __shfl_sync(0xffffffff, (var), (srcLane))
#else
#define SHFL_UP(var, delta) __shfl_up((var), (delta))
#define SHFL_DOWN(var, delta) __shfl_down((var), (delta))
#define SHFL(var, srcLane) __shfl((var), (srcLane))
#endif
namespace sgm
{
enum { FORE = 0, BACK = 1 };
constexpr int WARP_SIZE = 32;
constexpr int MC_MAX_DIFF = 64;
constexpr float SIGMA_LINE_A = 0.01f;
constexpr float SIGMA_LINE_B = 1.f;
constexpr int MAX_WARPS_PER_DMAX = MAX_DISPARITY / WARP_SIZE;
constexpr int WTA_WARPS_PER_BLOCK = 2;
static int divUp(int total, int grain)
{
return (total + grain - 1) / grain;
}
template <typename T>
__inline__ __device__ T partialselect(T* buf, int n, int m)
{
for (int i = 0; i <= m; i++)
{
int mi = i;
T mv = buf[i];
for (int j = i + 1; j < n; j++)
{
if (buf[j] < mv)
{
mi = j;
mv = buf[j];
T t = buf[i]; buf[i] = buf[mi]; buf[mi] = t;
}
}
}
return buf[m];
}
template <typename T>
__inline__ __device__ T median(T* buf, int n)
{
const int m = n / 2;
return partialselect(buf, n, m);
}
template <typename T>
__global__ void horizontalMedianKernel(const T* __restrict__ disparity, int pitch,
T* columns, int umax, int vmax, int segmentWidth)
{
T buf[MAX_SEGMENT_WIDTH];
const int v = blockIdx.y * blockDim.y + threadIdx.y;
const int u = blockIdx.x * blockDim.x + threadIdx.x;
if (v >= vmax || u >= umax)
return;
for (int du = 0; du < segmentWidth; du++)
{
const T d = disparity[v * pitch + u * segmentWidth + du];
buf[du] = d < MAX_DISPARITY ? d : 0;
}
// get median
const T m = median(buf, segmentWidth);
// store with transposed
columns[u * vmax + v] = m;
}
__inline__ __device__ void warpShuffleReduction(const uint16_t* __restrict__ L, uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX],
uint16_t minCostsGO[2], int& minCostOO, int dmax, int droad, int vp, int tid)
{
const int tmax = dmax / WARP_SIZE;
int prevMinWarp[2], currMinWarp[2];
prevMinWarp[FORE] = P_INF;
prevMinWarp[BACK] = P_INF;
for (int t = 0, k = tid; t < tmax; t++, k += WARP_SIZE)
{
uint16_t localL[2];
localL[FORE] = L[k];
localL[BACK] = L[dmax - 1 - k];
// warp shfl reduction
for (int d = 1; d < 32; d <<= 1)
{
localL[FORE] = min(localL[FORE], SHFL_UP(localL[FORE], d));
localL[BACK] = min(localL[BACK], SHFL_UP(localL[BACK], d));
}
// calc min of all(before current warp)
localL[FORE] = min(localL[FORE], prevMinWarp[FORE]);
localL[BACK] = min(localL[BACK], prevMinWarp[BACK]);
currMinWarp[FORE] = SHFL(localL[FORE], 31);
currMinWarp[BACK] = SHFL(localL[BACK], 31);
// shift up by 1 elem
localL[FORE] = SHFL_UP(localL[FORE], 1);
localL[BACK] = SHFL_UP(localL[BACK], 1);
// first elem of warp is last elem value of one before warp
if (tid == 0)
{
localL[FORE] = t == 0 ? P_INF : prevMinWarp[FORE];
localL[BACK] = t == 0 ? P_INF : prevMinWarp[BACK];
}
// update last elem value
prevMinWarp[FORE] = currMinWarp[FORE];
prevMinWarp[BACK] = currMinWarp[BACK];
minCostsOO[FORE][t] = localL[FORE];
minCostsOO[BACK][tmax - 1 - t] = SHFL(localL[BACK], 31 - tid); // reversing [BACK] elems
if (droad <= 0 && t == tmax - 1)
minCostsGO[BACK] = currMinWarp[BACK];
else if (droad > 0 && t == tmax - 1 - droad / WARP_SIZE)
minCostsGO[BACK] = SHFL(localL[BACK], WARP_SIZE - 1 - (droad % WARP_SIZE));
}
minCostOO = SHFL(prevMinWarp[BACK], 0);
}
template <typename T>
__global__ void scanCostForeKernel(const T* __restrict__ disparity, int umax, int vmax, int dmax,
const float* __restrict__ road, int vhor, uint16_t* __restrict__ L)
{
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
if (uc > umax)
return;
const int tid = threadIdx.x;
const int n = dmax + 1;
// cost buffers
L += uc * vmax * n;
extern __shared__ uint16_t Lp[];
uint16_t* tmpLp = Lp + n;
uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX];
uint16_t minCostsGO[2];
int minCostOO = P_INF;
for (int vc = 0; vc < vmax; vc++)
{
uint16_t* Lc = L + vc * n;
const int vp = vc - 1;
const bool inside = vp >= 0 && vp < vmax;
const int d = disparity[uc * vmax + vc];
const int droad = lroundf(road[vc]);
const float sigmad = sqrtf(SIGMA_LINE_A * SIGMA_LINE_A * vc * vc + SIGMA_LINE_B * SIGMA_LINE_B);
if (inside)
warpShuffleReduction(Lp, minCostsOO, minCostsGO, minCostOO, dmax, droad, vp, tid);
const int minCost = inside ? min(minCostOO, static_cast<int>(Lp[dmax])) : 0;
for (int k = tid, t = 0; k < n; k += WARP_SIZE, t++)
{
const bool object = k < dmax;
// matching cost
const int dhat = object ? k : droad;
const int diff = object ? abs(d - dhat) : lroundf(max(d - dhat, 0) / sigmad);
const int MC = min(diff, MC_MAX_DIFF);
// transition cost
int PT = 0;
if (inside)
{
const int cost0 = Lp[k]; // no penalty
int cost1 = P_INF, cost2 = P_INF, cost3 = P_INF;
if (object)
{
// Object from Object
cost1 = minCostsOO[FORE][t] + P_OO1;
cost2 = minCostsOO[BACK][t] + P_OO2;
// Object from Ground
cost3 = k > droad ? Lp[dmax] + P_OG2 : P_INF;
}
else
{
// Ground from Object
cost3 = droad >= 0 && droad < dmax ? Lp[droad] + P_OG1 : P_INF;
}
PT = min(min(cost0, cost1), min(cost2, cost3));
}
const int cost = MC + PT - minCost;
Lc[k] = cost;
tmpLp[k] = cost;
}
for (int k = tid; k < n; k += WARP_SIZE)
Lp[k] = tmpLp[k];
}
}
template <typename T>
__global__ void scanCostBackKernel(const T* __restrict__ disparity, int umax, int vmax, int dmax,
const float* __restrict__ DR, int vhor, uint16_t* __restrict__ L)
{
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
if (uc > umax)
return;
const int tid = threadIdx.x;
const int n = dmax + 1;
// cost buffers
L += uc * vmax * n;
extern __shared__ uint16_t Lp[];
uint16_t minCostsOO[2][MAX_WARPS_PER_DMAX];
uint16_t minCostsGO[2];
int minCostOO = P_INF;
for (int vc = vmax - 1; vc >= 0; vc--)
{
uint16_t* Lc = L + vc * n;
const int vp = vc + 1;
const bool inside = vp >= 0 && vp < vmax;
const int d = disparity[uc * vmax + vc];
const int droad = lroundf(DR[vc]);
const float sigmad = sqrtf(SIGMA_LINE_A * SIGMA_LINE_A * vc * vc + SIGMA_LINE_B * SIGMA_LINE_B);
if (inside)
warpShuffleReduction(Lp, minCostsOO, minCostsGO, minCostOO, dmax, droad, vp, tid);
const int minCost = inside ? min(minCostOO, static_cast<int>(Lp[dmax])) : 0;
for (int k = tid, t = 0; k < n; k += WARP_SIZE, t++)
{
const bool object = k < dmax;
// matching cost
const int dhat = object ? k : droad;
const int diff = object ? abs(d - dhat) : lroundf(max(d - dhat, 0) / sigmad);
const int MC = min(diff, MC_MAX_DIFF);
// transition cost
int PT = 0;
if (inside)
{
const int cost0 = Lp[k]; // no penalty
int cost1 = P_INF, cost2 = P_INF, cost3 = P_INF;
if (object)
{
// Object from Object
cost1 = minCostsOO[FORE][t] + P_OO2;
cost2 = minCostsOO[BACK][t] + P_OO1;
// Object from Ground
cost3 = k == droad ? Lp[dmax] + P_OG1 : P_INF;
}
else
{
// Ground from Object
cost3 = minCostsGO[BACK] + P_OG2;
}
PT = min(min(cost0, cost1), min(cost2, cost3));
}
const int cost = MC + PT - minCost;
Lc[k] = cost;
Lp[k] = cost;
}
}
}
__global__ void winnerTakesAllKernel(const uint16_t* __restrict__ LFore, const uint16_t* __restrict__ LBack,
uint16_t* __restrict__ labels, int umax, int vmax, int dmax, int vhor)
{
const int warpIdx = threadIdx.x / WARP_SIZE;
const int laneIdx = threadIdx.x % WARP_SIZE;
const int uc = blockIdx.y * blockDim.y + threadIdx.y;
const int vc = blockIdx.x * WTA_WARPS_PER_BLOCK + warpIdx;
if (uc >= umax || vc >= vmax)
return;
const int n = dmax + 1;
int minSCost = INT_MAX;
int label = -1;
LFore += (uc * vmax + vc) * n;
LBack += (uc * vmax + vc) * n;
for (int t = 0, k = laneIdx; t * WARP_SIZE < n; t++, k += WARP_SIZE)
{
const bool object = k < dmax;
const bool invalid = (!object && vc <= vhor) || k >= n;
int scost = invalid ? P_INF : LFore[k] + LBack[k];
int mink = k;
for (int d = 16; d > 0; d >>= 1)
{
int _scost = SHFL_DOWN(scost, d);
int _mink = SHFL_DOWN(mink, d);
if (_scost < scost)
{
scost = _scost;
mink = _mink;
}
else if (_scost == scost)
mink = min(mink, _mink);
}
if (laneIdx == 0 && scost < minSCost)
{
minSCost = scost;
label = mink;
}
}
if (laneIdx == 0)
labels[uc * vmax + vc] = label;
}
template <typename T>
void horizontalMedianGPU(const T* D, int pitch, T* columns, int umax, int vmax, int segmentWidth)
{
const dim3 block(32, 32);
const dim3 grid(divUp(umax, block.x), divUp(vmax, block.y));
horizontalMedianKernel<<<grid, block>>>(D, pitch, columns, umax, vmax, segmentWidth);
CUDA_CHECK(cudaGetLastError());
}
template void horizontalMedianGPU<uint8_t>(const uint8_t* D, int pitch, uint8_t* columns, int umax, int vmax, int segmentWidth);
template void horizontalMedianGPU<uint16_t>(const uint16_t* D, int pitch, uint16_t* columns, int umax, int vmax, int segmentWidth);
template <typename T>
void scanCostForeGPU(const T* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L,
cudaStream_t stream)
{
const dim3 grid(1, umax);
const dim3 block(32, 1);
const int n = dmax + 1;
const size_t sharedSize = sizeof(uint16_t) * 2 * n;
scanCostForeKernel<<<grid, block, sharedSize, stream>>>(D, umax, vmax, dmax, DR, vhor, L);
CUDA_CHECK(cudaGetLastError());
}
template void scanCostForeGPU<uint8_t>(const uint8_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, cudaStream_t stream);
template void scanCostForeGPU<uint16_t>(const uint16_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, cudaStream_t stream);
template <typename T>
void scanCostBackGPU(const T* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L,
cudaStream_t stream)
{
const dim3 grid(1, umax);
const dim3 block(32, 1);
const int n = dmax + 1;
const size_t sharedSize = sizeof(uint16_t) * n;
scanCostBackKernel<<<grid, block, sharedSize, stream>>>(D, umax, vmax, dmax, DR, vhor, L);
CUDA_CHECK(cudaGetLastError());
}
template void scanCostBackGPU<uint8_t>(const uint8_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, cudaStream_t stream);
template void scanCostBackGPU<uint16_t>(const uint16_t* D, int umax, int vmax, int dmax, const float* DR, int vhor, uint16_t* L, cudaStream_t stream);
void winnerTakesAllGPU(uint16_t* LFore, uint16_t* LBack, uint16_t* labels,
int umax, int vmax, int dmax, int vhor)
{
const dim3 grid(divUp(vmax, WTA_WARPS_PER_BLOCK), umax);
const dim3 block(WTA_WARPS_PER_BLOCK * WARP_SIZE, 1);
winnerTakesAllKernel<<<grid, block>>>(LFore, LBack, labels, umax, vmax, dmax, vhor);
CUDA_CHECK(cudaGetLastError());
}
} // namespace sgm
|
caa4778419780afa7a992cc3a7ee36c5209a506b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
lb_kernel.cu
GPU kernel functions.
Author: Scott Christley <schristley@mac.com>
Copyright (C) 2010 Scott Christley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met: 1. Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer. 2. Redistributions in binary form must reproduce the
above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution. 3. The name of the author may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LB_KERNEL_CU
#define LB_KERNEL_CU
#include <math.h>
#include "sem_kernel.cu"
__constant__ double t[] = {1.0/3.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0};
__constant__ double cx[] = {0, 1,-1, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1, 0, 0, 0, 0};
__constant__ double cy[] = {0, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1, 1,-1,-1};
__constant__ double cz[] = {0, 0, 0, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1,-1, 1,-1, 1,-1};
__constant__ int bb[] = {0, 2, 1, 4, 3, 6, 5, 12, 11, 14, 13, 8, 7, 10, 9, 18, 17, 16, 15};
//// TMP
// float newcx[] = {0, 1, 0, 0, -1, 0, 0, 1, -1, -1, 1, 1, -1, -1, 1, 0, 0, 0, 0};
__device__ int stepL = 0, stepR = 0;
__device__ float
get3d_value_float(void *devPtr, size_t pitch, size_t slicePitch, int i, int j, int k)
{
char *slice = (char *)devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
return row[i];
}
__device__ double
get3d_value(void *devPtr, size_t pitch, size_t slicePitch, int i, int j, int k)
{
char *slice = (char *)devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
return row[i];
}
/*__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}*/
/*__global__ void
fluid3d_in_out_flow_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
float tmp_ux, tmp_uy, tmp_uz, cu;
float Cs = grids->dx/grids->dt;
float Nyx = 0.0, Nyz = 0.0;
//// Inflow boundary condition at height = 0.
if ((j == 0) && (k > 0) && (k < (grids->depth - 1))
&& (i > 0) && (i < (grids->width - 1)) )
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Inlet: Poiseuille profile
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
tmp_ux = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx);
tmp_uy = SHEAR_RATE* k * (grids->dx);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0;
tmp_uz = 0;
if( (i == 1) || (i == grids->width-2) ||
(k == 1) || (k == grids->depth-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = INIT_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = INIT_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/Cs)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nyx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/Cs;
Nyz = 0.5 * (get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/Cs;
devPtr = (char *)grids->fIN[3].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/(Cs);
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[15].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyz;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyz;
/////END::: NEW 08052011, with units
}
} /// end :::: if(k == 0)
//// Outflow boundary condition at depth = grid->depth-1.
if ((j == (grids->height - 1))
&& (k > 0) && (k < (grids->depth - 1))
&& (i > 0) && (i < (grids->width - 1))) {
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Outlet: Constant pressure
//
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
tmp_ux = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx);;
tmp_uy = SHEAR_RATE* k * (grids->dx);;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0;
tmp_uz = 0;
if( (i == 1) || (i == grids->width-2) ||
(k == 1) || (k == grids->depth-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = INIT_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = INIT_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 1.0 / (1.0 + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/Cs)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)));
devPtr = (char *)grids->fIN[4].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/(Cs);
/////////////////////
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyz;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[17].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyz;
}
} /// END:::: if ((k == (grids->depth - 1))
}*/
__global__ void
fluid3d_noslip_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
double C = grids->dx/grids->dt/UMAX;
double Nzx = 0.0, Nzy = 0.0;
//// Inflow boundary condition at height = 0.
if ((k == 0)/* && (i > 0) && (i < grids->width - 1) && (j > 0) && (j < grids->height - 1)*/)
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
/* if( (j == 1) || (j == grids->height-2) ||
(i == 1) || (i == grids->width-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = FLUID_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = FLUID_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else*/
// {
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nzx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/C;
Nzy = 0.5 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/C;
devPtr = (char *)grids->fIN[5].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[9].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
- Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[13].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
+ Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[15].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
- Nzy;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[17].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
+ Nzy;
/////END::: NEW 08052011, with units
//}
/* double rho = 0.0, ux = 0.0, uy = 0.0, uz = 0.0, dt = grids->dt, f[19];
for (int d = 0; d < 19; ++d) {
rho += get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
ux += cx[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
uy += cy[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
uz += cz[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
if (i == 40 && j == 40 && d == 5){
double a = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
printf("bd fIN[%d] = %.16e\n", d, a);
}
// f[d] = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
}
if (i == 40 && j == 40){
printf("Boundary fIN[5] = %.16e, fIN[6] = %.16e\n", f[5],f[6]);
}
if (i == 40 && j == 40 && k == 1){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux, uy, uz);
}
// float fdt = 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt;
ux = (ux + 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uy = (uy + 0.5 * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uz = (uz + 0.5 * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
if (i == 40 && j == 40){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux, uy, uz);
}*/
} /// end :::: if(k == 0)
}
__global__ void
fluid3d_moving_plate_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
float C = grids->dx/grids->dt/UMAX;
float Nzx = 0.0, Nzy = 0.0;
//// Inflow boundary condition at height = 0.
if ((k == grids->depth - 1 )/* && (i > 0) && (i < grids->width - 1) && (j > 0) && (j < grids->height - 1)*/)
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Inlet: Poiseuille profile
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx)/UMAX;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
/* if( (j == 1) || (j == grids->height-2) ||
(i == 1) || (i == grids->width-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = FLUID_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = FLUID_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else*/
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nzx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/C;
Nzy = 0.5 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/C;
devPtr = (char *)grids->fIN[6].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
- Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
+ Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
- Nzy;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
+ Nzy;
/////END::: NEW 08052011, with units
}
} /// end :::: if(i == 0)
}
/*__global__ void
fluid3d_edge_corner_boundary_kernel(void *g, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr;
char *slice;
float *row;
//const float SHEAR_RATE = 1e4;
float Nyzx = 0.0, Nxzy = 0.0;
if (k == grids->depth - 1){
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE * k * (grids->dx);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0 ;
//Edges
if ((j == 0) && (i > 0) && (i < grids->width - 1)){
int bounce[19] = {0, 1, 2, 4, 4, 5, 5, 12, 8, 9, 13, 8, 12, 13, 9, 15, 17, 17, 18};
//{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12, 13, 14,15, 16, 17, 18}
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k);
Nyzx = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k));
}
if ((j == grids->height - 1) && (i > 0) && (i < grids->width - 1)){
int bounce[19] = {0, 1, 2, 3, 3, 5, 5, 7, 11, 9, 13, 11, 7, 13, 9, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k);
Nyzx = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k));
}
if ((i == 0) && (j > 0) && (j < grids->height - 1)){
int bounce[19] = {0, 2, 2, 3, 4, 5, 5, 12, 11, 9, 13, 11, 12, 13, 14, 15, 17, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k);
Nxzy = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j > 0) && (j < grids->height - 1)){
int bounce[19] = {0, 1, 1, 3, 4, 5, 5, 7, 8, 9, 10, 8, 7, 13, 9, 15, 17, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k);
Nxzy = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k));
}
// corners
if ((i == 0) && (j == 0)){
int bounce[19] = {0, 2, 2, 4, 4, 5, 5, 12, 8, 9, 13, 11, 12, 13, 14, 15, 17, 17, 18};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k));
}
if ((i == 0) && (j == grids->height - 1)){
int bounce[19] = {0, 2, 2, 3, 3, 5, 5, 7, 11, 9, 13, 11, 12, 13, 14, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j == 0)){
int bounce[19] = {0, 1, 1, 4, 4, 5, 5, 7, 8, 9, 10, 8, 12, 13, 9, 15, 17, 17, 18};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j == grids->height - 1)){
int bounce[19] = {0, 1, 1, 3, 3, 5, 5, 7, 8, 9, 10, 11, 7, 13, 9, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k));
}
}//if (k == grids->depth - 1)
}*/
///// fluid3d_obst_bounce_back_kernel() do bounce-back (reverse states only)
///// for all solid nodes.
///// For solid nodes, the bounce-back states are saved in
///// both fIN and fOUT. fOUT will be used in fluid3d_obst_stream_kernel()
///// to stream the state from solid nodes back to the connected fluid nodes.
///// fluid3d_obst_bounce_back_kernel() and fluid3d_obst_stream_kernel()
///// together accomplish the no-slip wall boundary condition by bounce-back rule.
/*__global__ void
fluid3d_obst_bounce_back_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
// NOT OBSTACLES, return
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] < 0.5) return;
// MICROSCOPIC BOUNDARY CONDITIONS: OBSTACLES (Half-Way bounce-back)
//
//// switch distribution function at solid nodes.
//// Call fluid3d_obst_stream_kernel() afterwards to bounce-back.
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fOUT[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bb[d]].ptr, pitch, slicePitch, i, j, k);
}
}*/
__global__ void
fluid3d_velocity_density_kernel(void *g, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
float dx = grids->dx, dt = grids->dt;
double C = dx/dt/UMAX;
double ux = 0.0, uy = 0.0, uz = 0.0, rho = 0.0;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
size_t pitchf = grids->obst.pitch;
size_t slicePitchf = pitchf * grids->height;
// OBSTACLES, return
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitchf;
float *rowf = (float *)(slice + j * pitchf);
if (rowf[i] > 0.5) return;
// double f[19];
//// calculate rho and ux, uy, uz
//double fx, fy, fz;
for (d = 0; d < 19; ++d) {
rho += get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
ux += cx[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
uy += cy[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
uz += cz[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
/* if (i == 40 && j == 20 && k == 60){
double a = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
printf("fIN[%d] = %.16e\n", d, a);
}*/
// f[d] = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
}
/* if (i == 40 && j == 200 && k == 40){
printf("fIN[5] = %.16e, fIN[6] = %.16e\n", f[5],f[6]);
}*/
/* if (i == 40 && j == 20 && k == 60){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux * Cs, uy * Cs, uz * Cs);
}*/
//fx = UNIT_FACTOR * 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt;
//fy = UNIT_FACTOR * 0.5 * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt;
//fz = UNIT_FACTOR * 0.5 * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt;
double F_over_RHO_U = F0/INIT_RHO/UMAX * UNIT_FACTOR;
ux = (ux * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uy = (uy * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uz = (uz * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
/*if (i == 40 && j == 200 && k == 7){
stepL ++;
printf("L:step = %d, ux = %.16e, uy = %.16e, uz = %.16e\n", stepL, ux, uy, uz);
// printf("fx = %.16e, fy = %.16e, fz = %.16e\n", fx, fy, fz);
}*/
/* if (i == 80 && j == 2 && k == 65){
stepR ++;
printf("R:step = %d, ux = %.16e, uy = %.16e, uz = %.16e\n", stepR, ux, uy, uz);
}*/
/*ux = ux * Cs /rho;
uy = uy * Cs /rho;
uz = uz * Cs /rho;*/
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
row[i] = rho;
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = ux;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = uy;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = uz;
}
///// fluid3d_collision_kernel()
///// does collision step for all fluid nodes.
///// save to fOUT.
__global__ void
fluid3d_collision_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
double C = grids->dx/grids->dt/UMAX;
double rho = 0.0, cu = 0.0;
double F[3], u[3], e[3], uF[3][3], ee[3][3];
double C2 = C*C;
double C4 = C2*C2;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
//size_t pitchf = grids->obst.pitch;
//size_t slicePitchf = pitchf * grids->height;
// OBSTACLES, return
/*char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] > 0.5) return;*/
rho = get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k);
u[0] = get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k);
u[1] = get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k);
u[2] = get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k);
/*if (i == 37 && j == 3 && k == 10){
printf("ux = %.16e, uy = %.16e, uz = %.16e\n", u[0], u[1], u[2]);
// printf("fx = %.16e, fy = %.16e, fz = %.16e\n", fx, fy, fz);
}*/
char *devPtr = (char *)grids->Fx.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
F[0] = row[i];
devPtr = (char *)grids->Fy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
F[1] = row[i];
devPtr = (char *)grids->Fz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
F[2] = row[i];
for (int l = 0; l < 3; ++l){
for (int m = 0; m < 3; ++m){
uF[l][m] = u[l] * F[m] + F[l] * u[m];
}
}
for (d = 0; d < 19; ++d) {
e[0] = cx[d] * C;
e[1] = cy[d] * C;
e[2] = cz[d] * C;
double sum = 0;
cu = 3.0 * (e[0] * u[0] + e[1] * u[1] + e[2] * u[2]);
for (int l = 0; l < 3; ++l)
for (int m = 0; m < 3; ++m){
if (l == m) ee[l][m] = e[l] * e[m] - C2/3.0;
else ee[l][m] = e[l] * e[m];
}
for (int l = 0; l < 3; ++l){
for (int m = 0; m < 3; ++m){
sum += uF[l][m] * ee[m][l];
}
}
double fEQ = rho * t[d] * (1.0 + cu/(C2) + 0.5 * cu * cu/(C4) - 1.5 * (u[0] * u[0] + u[1] * u[1] + u[2] * u[2])/(C2));
double fIN = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
/* if(i == 0 && j == 0 && k == 2){
printf("rho = %.16e, fEQ = %.16e, fIN[%d] = %.16e\n", rho, fEQ, d, fIN);
}*/
char *devPtr = (char *)grids->fOUT[d].ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
// double fdt = t[d] * (1.0 - 0.5/OMEGA) * (3.0 * ((cx[d] * Cs - ux) * Fx +
// (cy[d] * Cs - uy)* Fy + (cz[d] * Cs - uz) * Fz) +
// 9.0 * (cx[d] * ux + cy[d] * uy + cz[d] * uz) * (cx[d] * Fx + cy[d] * Fy +
// cz[d] * Fz))/ Cs2 * (grids->dt) * UNIT_FACTOR;// pico gram/micron^3
double fdt = t[d] * (1.0 - 0.5/OMEGA) * (3.0 * (e[0] * F[0] + e[1] * F[1] + e[2] * F[2])/C2 +
4.5 * sum /C4) * (grids->dt);
// double fdt = 1.5 * t[d] * (Fx * cx[d] + Fy * cy[d] + Fz * cz[d]) * Cs * (grids->dt);
row[i] = fIN - (1.0/OMEGA) * (fIN - fEQ) + F0 * fdt/INIT_RHO/UMAX * UNIT_FACTOR;
/*if(i == 40 && j == 20 && k == 60 && d == 0){
printf("fdt = %.16e\n", fdt);
// printf("fIN[%d] = %.16e,fOUT[%d] = %.16e\n", d, fIN, d, row[i]);
// printf("Fx = %.16e, Fy = %.16e, Fz = %.16e\n", Fx,Fy,Fz);
}*/
}
}
///// fluid3d_stream_kernel() only do streaming
///// for fluid nodes.
///// Also, periodicity is assumed in x, y, and z directions.
///// When using inflow/outflow boundary condition at z-direction, this makes
///// fIN for nodes in inflow/outflow plane incorrect.
///// Results are save to fIN.
__global__ void
fluid3d_stream_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
// OBSTACLES needs to receive from fluid nodes as well.
/***
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] > 0.5) return;
***/
int si, sj, sk, d;
/*** current [i][j][k] receives from neighors. ****/
for (d = 0; d < 19; ++d)
{
si = i - (int)cx[d]; sj = j - (int)cy[d]; sk = k - (int)cz[d];
//// NOTE: Periodicity is assumed here.
//// This is not matter for most cases considered. Specific
//// boundary condition functions will be called after to set proper
//// boundary conditions.
if (si < 0) si = grids->width - 1;
if (sj < 0) sj = grids->height - 1;
if (sk < 0) sk = grids->depth - 1;
if (si == grids->width) si = 0;
if (sj == grids->height) sj = 0;
if (sk == grids->depth) sk = 0;
char *devPtr = (char *)grids->fIN[d].ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
row[i] = get3d_value(grids->fOUT[d].ptr, pitch, slicePitch, si, sj, sk);
}
}
__global__ void
fluid3d_obst_stream_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int lx = grids->width;
// int ly = grids->height;
// int lz = grids->depth;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
int si, sj, sk, d;
/*** [i][j][k] receives from neighors. ****/
// Go through all nodes. Find fluid nodes which are linked to solid boundary nodes.
//// IMPORTANT: check if (si,sj,sk) is a solid boundary node.
//// If not, skip it.
//// This assumes that we use inflow-outflow boundary condition in depth direction
//// and wall boundary conditions at the rest boundaries, which are specified by
//// grids->obst.
for (d = 0; d < 19; ++d)
{
si = i - (int)cx[d]; sj = j - (int)cy[d]; sk = k - (int)cz[d];
if (si < 0) continue;
if (sj < 0) continue;
if (sk < 0) continue;
if (si >= grids->width) continue;
if (sj >= grids->height) continue;
if (sk >= grids->depth) continue;
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + sk * slicePitch;
float *row = (float *)(slice + sj * pitch);
if (row[si] < 0.5) continue;
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
// Using fOUT[bb[d]] is wrong, has been reversed already in fluid3d_obst_bounce_back_kernel.
// row[i] = get3d_value(grids->fOUT[bb[d]].ptr, pitch, slicePitch, si, sj, sk);
row[i] = get3d_value(grids->fOUT[d].ptr, pitch, slicePitch, si, sj, sk);
}
}
//Calculate distributed force on LB grids
__global__ void
fluid3d_force_distribute_kernel(void *g, void *g_SEM, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
sem_GPUgrids *grids_SEM = (sem_GPUgrids *)g_SEM;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
float dx = grids->dx;
double threshold = 2 * dx;
double delta;
double x, y, z;
int numOfCells = grids_SEM->numOfCells;
int* numOfElements = grids_SEM->numOfElements;
double *sem_X = grids_SEM->X;
double *sem_Y = grids_SEM->Y;
double *sem_Z = grids_SEM->Z;
double *sem_Fx = grids_SEM->F_X;
double *sem_Fy = grids_SEM->F_Y;
double *sem_Fz = grids_SEM->F_Z;
size_t pitch_sem = grids_SEM->pitch;
char *devPtr = (char *)grids->Fx.ptr;
char *slice = devPtr + k * slicePitch;
double *Fx = (double *)(slice + j * pitch);
devPtr = (char *)grids->Fy.ptr;
slice = devPtr + k * slicePitch;
double *Fy = (double *)(slice + j * pitch);
devPtr = (char *)grids->Fz.ptr;
slice = devPtr + k * slicePitch;
double *Fz = (double *)(slice + j * pitch);
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
row = (double*)((char*)sem_Fx + m * pitch_sem);
Fx[i] += row[l] * delta;
row = (double*)((char*)sem_Fy + m * pitch_sem);
Fy[i] += row[l] * delta;
row = (double*)((char*)sem_Fz + m * pitch_sem);
Fz[i] += row[l] * delta;
}
//Periodic distribute
if (j < 2 || j > (grids->height - 2)){
if (j < 2) j = j + grids->height;
else j = j - grids->height;
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
row = (double*)((char*)sem_Fx + m * pitch_sem);
Fx[i] += row[l] * delta;
row = (double*)((char*)sem_Fy + m * pitch_sem);
Fy[i] += row[l] * delta;
row = (double*)((char*)sem_Fz + m * pitch_sem);
Fz[i] += row[l] * delta;
}
}
}//fluid3d_force_distribut_kernel
//Calculate distributed velocity on SEM
__global__ void
fluid3d_velocity_distribute_kernel(void *g, void *g_SEM, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
sem_GPUgrids *grids_SEM = (sem_GPUgrids *)g_SEM;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * (grids->height);
double dx = grids->dx, x, y, z;
double threshold = 2 * dx;
double delta;
int numOfCells = grids_SEM->numOfCells;
int *numOfElements = grids_SEM->numOfElements;
double *sem_X = grids_SEM->X;
double *sem_Y = grids_SEM->Y;
double *sem_Z = grids_SEM->Z;
double *sem_Vx = grids_SEM->V_X;
double *sem_Vy = grids_SEM->V_Y;
double *sem_Vz = grids_SEM->V_Z;
size_t pitch_sem = grids_SEM->pitch;
char *devPtr = (char *)grids->ux.ptr;
char *slice = devPtr + k * slicePitch;
double *ux = (double *)(slice + j * pitch);
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
double *uy = (double *)(slice + j * pitch);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
double *uz = (double *)(slice + j * pitch);
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
//double *rho = (double *)(slice + j * pitch);
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
/*if (i == 40 && j == 20 && k == 60 && m == 0){
printf("delta = %.16e, uy = %.16e\n", delta, uy[i]);
}*/
// row = (double*)((char*)sem_rho + m * pitch_sem);
// atomicAdd(&(row[l]), rho[i] * delta);
row = (double*)((char*)sem_Vx + m * pitch_sem);
atomicAdd(&(row[l]), ux[i] * delta);
/*if (m == 0){
printf("i = %d, j = %d, k = %d, Vx = %.16e\n", i, j, k,row[l]);
}*/
row = (double*)((char*)sem_Vy + m * pitch_sem);
atomicAdd(&(row[l]), uy[i] * delta);
// if (m == 0){
//printf("i = %d, j = %d, k = %d, uy = %.16e, delta = %e, um = %e\n",i,j,k,uy[i],delta,row[l]);
// printf("%e\n", uy[i] * delta);
// }
row = (double*)((char*)sem_Vz + m * pitch_sem);
atomicAdd(&(row[l]), uz[i] * delta);
//if (m == 0){
// printf("i = %d, j = %d, k = %d, uy = %.16e, um = %e\n",i,j,k,uy[i],row[l]);
// }
}
// Periodic distribute
if (j < 2 || j > (grids->height - 2)){
if (j < 2) j = j + grids->height;
else j = j - grids->height;
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
// row = (double*)((char*)sem_rho + m * pitch_sem);
// atomicAdd(&(row[l]), rho[i] * delta);
row = (double*)((char*)sem_Vx + m * pitch_sem);
atomicAdd(&(row[l]), ux[i] * delta);
row = (double*)((char*)sem_Vy + m * pitch_sem);
atomicAdd(&(row[l]), uy[i] * delta);
row = (double*)((char*)sem_Vz + m * pitch_sem);
atomicAdd(&(row[l]), uz[i] * delta);
}
}
}//fluid3d_velocity_distribute_kernel
#endif
| caa4778419780afa7a992cc3a7ee36c5209a506b.cu | /*
lb_kernel.cu
GPU kernel functions.
Author: Scott Christley <schristley@mac.com>
Copyright (C) 2010 Scott Christley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met: 1. Redistributions of source code must retain the above
copyright notice, this list of conditions and the following
disclaimer. 2. Redistributions in binary form must reproduce the
above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with
the distribution. 3. The name of the author may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LB_KERNEL_CU
#define LB_KERNEL_CU
#include <math.h>
#include "sem_kernel.cu"
__constant__ double t[] = {1.0/3.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0, 1.0/18.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0,
1.0/36.0, 1.0/36.0, 1.0/36.0, 1.0/36.0};
__constant__ double cx[] = {0, 1,-1, 0, 0, 0, 0, 1, 1, 1, 1,-1,-1,-1,-1, 0, 0, 0, 0};
__constant__ double cy[] = {0, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1, 1,-1,-1};
__constant__ double cz[] = {0, 0, 0, 0, 0, 1,-1, 0, 0, 1,-1, 0, 0, 1,-1, 1,-1, 1,-1};
__constant__ int bb[] = {0, 2, 1, 4, 3, 6, 5, 12, 11, 14, 13, 8, 7, 10, 9, 18, 17, 16, 15};
//// TMP
// float newcx[] = {0, 1, 0, 0, -1, 0, 0, 1, -1, -1, 1, 1, -1, -1, 1, 0, 0, 0, 0};
__device__ int stepL = 0, stepR = 0;
__device__ float
get3d_value_float(void *devPtr, size_t pitch, size_t slicePitch, int i, int j, int k)
{
char *slice = (char *)devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
return row[i];
}
__device__ double
get3d_value(void *devPtr, size_t pitch, size_t slicePitch, int i, int j, int k)
{
char *slice = (char *)devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
return row[i];
}
/*__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}*/
/*__global__ void
fluid3d_in_out_flow_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
float tmp_ux, tmp_uy, tmp_uz, cu;
float Cs = grids->dx/grids->dt;
float Nyx = 0.0, Nyz = 0.0;
//// Inflow boundary condition at height = 0.
if ((j == 0) && (k > 0) && (k < (grids->depth - 1))
&& (i > 0) && (i < (grids->width - 1)) )
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Inlet: Poiseuille profile
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
tmp_ux = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx);
tmp_uy = SHEAR_RATE* k * (grids->dx);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0;
tmp_uz = 0;
if( (i == 1) || (i == grids->width-2) ||
(k == 1) || (k == grids->depth-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = INIT_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = INIT_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/Cs)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nyx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/Cs;
Nyz = 0.5 * (get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/Cs;
devPtr = (char *)grids->fIN[3].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/(Cs);
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[15].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyz;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyz;
/////END::: NEW 08052011, with units
}
} /// end :::: if(k == 0)
//// Outflow boundary condition at depth = grid->depth-1.
if ((j == (grids->height - 1))
&& (k > 0) && (k < (grids->depth - 1))
&& (i > 0) && (i < (grids->width - 1))) {
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Outlet: Constant pressure
//
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
tmp_ux = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx);;
tmp_uy = SHEAR_RATE* k * (grids->dx);;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0;
tmp_uz = 0;
if( (i == 1) || (i == grids->width-2) ||
(k == 1) || (k == grids->depth-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = INIT_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = INIT_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 1.0 / (1.0 + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/Cs)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)));
devPtr = (char *)grids->fIN[4].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/(Cs);
/////////////////////
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
+ Nyz;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[17].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k))/(Cs)
- Nyz;
}
} /// END:::: if ((k == (grids->depth - 1))
}*/
__global__ void
fluid3d_noslip_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
double C = grids->dx/grids->dt/UMAX;
double Nzx = 0.0, Nzy = 0.0;
//// Inflow boundary condition at height = 0.
if ((k == 0)/* && (i > 0) && (i < grids->width - 1) && (j > 0) && (j < grids->height - 1)*/)
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
/* if( (j == 1) || (j == grids->height-2) ||
(i == 1) || (i == grids->width-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = FLUID_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = FLUID_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else*/
// {
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 - get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nzx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/C;
Nzy = 0.5 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/C;
devPtr = (char *)grids->fIN[5].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[6].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[9].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
- Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[13].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
+ Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[15].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
- Nzy;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[17].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
+ Nzy;
/////END::: NEW 08052011, with units
//}
/* double rho = 0.0, ux = 0.0, uy = 0.0, uz = 0.0, dt = grids->dt, f[19];
for (int d = 0; d < 19; ++d) {
rho += get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
ux += cx[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
uy += cy[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
uz += cz[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k)*Cs;
if (i == 40 && j == 40 && d == 5){
double a = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
printf("bd fIN[%d] = %.16e\n", d, a);
}
// f[d] = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
}
if (i == 40 && j == 40){
printf("Boundary fIN[5] = %.16e, fIN[6] = %.16e\n", f[5],f[6]);
}
if (i == 40 && j == 40 && k == 1){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux, uy, uz);
}
// float fdt = 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt;
ux = (ux + 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uy = (uy + 0.5 * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uz = (uz + 0.5 * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
if (i == 40 && j == 40){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux, uy, uz);
}*/
} /// end :::: if(k == 0)
}
__global__ void
fluid3d_moving_plate_boundary_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr = (char *)grids->rho.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
float C = grids->dx/grids->dt/UMAX;
float Nzx = 0.0, Nzy = 0.0;
//// Inflow boundary condition at height = 0.
if ((k == grids->depth - 1 )/* && (i > 0) && (i < grids->width - 1) && (j > 0) && (j < grids->height - 1)*/)
{
// Implementation of on-site velocity boundary conditions for D2Q19 lattice Boltzmann simulations
// M. Hecht, J. Harting. J. of Stat. Mech.: Theory and Experiment. 2010
// % MACROSCOPIC (DIRICHLET) BOUNDARY CONDITIONS
// % Inlet: Poiseuille profile
/// due to singularity for nodes which are directly linked to solid boundary nodes. Special treatment is needed.
/// In fact, need to follow "M. Hecht, J. Harting." and "Y.T. Feng, K. Han and D.R.J. Owen.
/// Coupled lattice Boltzmann method and discrete element modeling of particle transport in
/// turbulent fluid flows: Computational issue. Int. J. Numer. Meth. Engng. 72:1111-1134, 2007" to derive the boundary condition.
/// Here, we try a simple fix: use initial condition to compute f again.
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = SHEAR_RATE* k * (grids->dx)/UMAX;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = 0.0;
/* if( (j == 1) || (j == grids->height-2) ||
(i == 1) || (i == grids->width-2)
)
{
//// TMP fix 08092011. Simply reinitialize
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = FLUID_RHO;
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
cu = 3.0 * (cx[d] * tmp_ux + cy[d] * tmp_uy + cz[d] * tmp_uz)*Cs;
row[i] = FLUID_RHO*t[d]*( 1.0 + cu/(Cs*Cs) + 0.5*cu*cu/(Cs*Cs*Cs*Cs) -
1.5* (tmp_ux * tmp_ux + tmp_uy * tmp_uy + tmp_uz*tmp_uz)/(Cs*Cs));
}
}
else*/
{
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
///// NEW 08052011, with units.
///// part of fIN entering the domain is not correct at this point because of fluid3d_stream_kernel_2()
///// assumes periodicity on boundary conditions.
///// On inflow side (z = 0) in Z (depth) direction: This means e11, e15,e3, e16,e12 need to be updated.
row[i] = 1.0 / (1.0 + get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C)
* (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[0].ptr, pitch, slicePitch, i, j, k)
+ 2.0 * (get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)));
/////END::: NEW 08052011, with units
// % MICROSCOPIC BOUNDARY CONDITIONS: INLET (Zou/He BC)
//
/////NEW 08052011, with units
Nzx = 0.5 * (get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k)/C;
Nzy = 0.5 * (get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
- get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k))
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k)/C;
devPtr = (char *)grids->fIN[6].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[5].ptr, pitch, slicePitch, i, j, k)
- 1.0 / 3.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k) *
get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k)/C;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
- Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k))/C
+ Nzx;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) + get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
- Nzy;
/////END::: NEW 08052011, with units
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
/////NEW 08052011, with units
row[i] = get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ 1.0 / 6.0 * get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k)
* (- get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k) - get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k))/C
+ Nzy;
/////END::: NEW 08052011, with units
}
} /// end :::: if(i == 0)
}
/*__global__ void
fluid3d_edge_corner_boundary_kernel(void *g, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
char *devPtr;
char *slice;
float *row;
//const float SHEAR_RATE = 1e4;
float Nyzx = 0.0, Nxzy = 0.0;
if (k == grids->depth - 1){
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = SHEAR_RATE * k * (grids->dx);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 0.0 ;
//Edges
if ((j == 0) && (i > 0) && (i < grids->width - 1)){
int bounce[19] = {0, 1, 2, 4, 4, 5, 5, 12, 8, 9, 13, 8, 12, 13, 9, 15, 17, 17, 18};
//{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12, 13, 14,15, 16, 17, 18}
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k);
Nyzx = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[16].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[17].ptr, pitch, slicePitch, i, j, k));
}
if ((j == grids->height - 1) && (i > 0) && (i < grids->width - 1)){
int bounce[19] = {0, 1, 2, 3, 3, 5, 5, 7, 11, 9, 13, 11, 7, 13, 9, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[1].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[2].ptr, pitch, slicePitch, i, j, k);
Nyzx = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[10].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nyzx;
devPtr = (char *)grids->fIN[14].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nyzx;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[15].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[18].ptr, pitch, slicePitch, i, j, k));
}
if ((i == 0) && (j > 0) && (j < grids->height - 1)){
int bounce[19] = {0, 2, 2, 3, 4, 5, 5, 12, 11, 9, 13, 11, 12, 13, 14, 15, 17, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k);
Nxzy = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[7].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[8].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[9].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[14].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j > 0) && (j < grids->height - 1)){
int bounce[19] = {0, 1, 1, 3, 4, 5, 5, 7, 8, 9, 10, 8, 7, 13, 9, 15, 17, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
float f1 = get3d_value(grids->fIN[3].ptr, pitch, slicePitch, i, j, k);
float f2 = get3d_value(grids->fIN[4].ptr, pitch, slicePitch, i, j, k);
Nxzy = 0.25 * (f1 - f2);
devPtr = (char *)grids->fIN[11].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[12].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[16].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] += Nxzy;
devPtr = (char *)grids->fIN[18].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] -= Nxzy;
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/22.0 * (get3d_value(grids->fIN[10].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[13].ptr, pitch, slicePitch, i, j, k));
}
// corners
if ((i == 0) && (j == 0)){
int bounce[19] = {0, 2, 2, 4, 4, 5, 5, 12, 8, 9, 13, 11, 12, 13, 14, 15, 17, 17, 18};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k));
}
if ((i == 0) && (j == grids->height - 1)){
int bounce[19] = {0, 2, 2, 3, 3, 5, 5, 7, 11, 9, 13, 11, 12, 13, 14, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j == 0)){
int bounce[19] = {0, 1, 1, 4, 4, 5, 5, 7, 8, 9, 10, 8, 12, 13, 9, 15, 17, 17, 18};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[7].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[12].ptr, pitch, slicePitch, i, j, k));
}
if ((i == grids->width - 1) && (j == grids->height - 1)){
int bounce[19] = {0, 1, 1, 3, 3, 5, 5, 7, 8, 9, 10, 11, 7, 13, 9, 15, 16, 17, 15};
for (d = 0; d < 19; ++d){
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bounce[d]].ptr, pitch, slicePitch, i, j, k);
}
devPtr = (char *)grids->fIN[0].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = 12.0/18.0 * (get3d_value(grids->fIN[8].ptr, pitch, slicePitch, i, j, k)
+ get3d_value(grids->fIN[11].ptr, pitch, slicePitch, i, j, k));
}
}//if (k == grids->depth - 1)
}*/
///// fluid3d_obst_bounce_back_kernel() do bounce-back (reverse states only)
///// for all solid nodes.
///// For solid nodes, the bounce-back states are saved in
///// both fIN and fOUT. fOUT will be used in fluid3d_obst_stream_kernel()
///// to stream the state from solid nodes back to the connected fluid nodes.
///// fluid3d_obst_bounce_back_kernel() and fluid3d_obst_stream_kernel()
///// together accomplish the no-slip wall boundary condition by bounce-back rule.
/*__global__ void
fluid3d_obst_bounce_back_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
// NOT OBSTACLES, return
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] < 0.5) return;
// MICROSCOPIC BOUNDARY CONDITIONS: OBSTACLES (Half-Way bounce-back)
//
//// switch distribution function at solid nodes.
//// Call fluid3d_obst_stream_kernel() afterwards to bounce-back.
for (d = 0; d < 19; ++d)
{
devPtr = (char *)grids->fOUT[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
row[i] = get3d_value(grids->fIN[bb[d]].ptr, pitch, slicePitch, i, j, k);
}
}*/
__global__ void
fluid3d_velocity_density_kernel(void *g, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
float dx = grids->dx, dt = grids->dt;
double C = dx/dt/UMAX;
double ux = 0.0, uy = 0.0, uz = 0.0, rho = 0.0;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
size_t pitchf = grids->obst.pitch;
size_t slicePitchf = pitchf * grids->height;
// OBSTACLES, return
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitchf;
float *rowf = (float *)(slice + j * pitchf);
if (rowf[i] > 0.5) return;
// double f[19];
//// calculate rho and ux, uy, uz
//double fx, fy, fz;
for (d = 0; d < 19; ++d) {
rho += get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
ux += cx[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
uy += cy[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
uz += cz[d] * get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
/* if (i == 40 && j == 20 && k == 60){
double a = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
printf("fIN[%d] = %.16e\n", d, a);
}*/
// f[d] = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
}
/* if (i == 40 && j == 200 && k == 40){
printf("fIN[5] = %.16e, fIN[6] = %.16e\n", f[5],f[6]);
}*/
/* if (i == 40 && j == 20 && k == 60){
printf("rho = %.16e, ux = %.16e, uy = %.16e, uz = %.16e\n", rho, ux * Cs, uy * Cs, uz * Cs);
}*/
//fx = UNIT_FACTOR * 0.5 * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt;
//fy = UNIT_FACTOR * 0.5 * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt;
//fz = UNIT_FACTOR * 0.5 * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt;
double F_over_RHO_U = F0/INIT_RHO/UMAX * UNIT_FACTOR;
ux = (ux * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fx.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uy = (uy * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fy.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
uz = (uz * C + 0.5 * F_over_RHO_U * get3d_value(grids->Fz.ptr, pitch, slicePitch, i, j, k) * dt) /rho;
/*if (i == 40 && j == 200 && k == 7){
stepL ++;
printf("L:step = %d, ux = %.16e, uy = %.16e, uz = %.16e\n", stepL, ux, uy, uz);
// printf("fx = %.16e, fy = %.16e, fz = %.16e\n", fx, fy, fz);
}*/
/* if (i == 80 && j == 2 && k == 65){
stepR ++;
printf("R:step = %d, ux = %.16e, uy = %.16e, uz = %.16e\n", stepR, ux, uy, uz);
}*/
/*ux = ux * Cs /rho;
uy = uy * Cs /rho;
uz = uz * Cs /rho;*/
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
row[i] = rho;
devPtr = (char *)grids->ux.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = ux;
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = uy;
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
row[i] = uz;
}
///// fluid3d_collision_kernel()
///// does collision step for all fluid nodes.
///// save to fOUT.
__global__ void
fluid3d_collision_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
int d;
double C = grids->dx/grids->dt/UMAX;
double rho = 0.0, cu = 0.0;
double F[3], u[3], e[3], uF[3][3], ee[3][3];
double C2 = C*C;
double C4 = C2*C2;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
//size_t pitchf = grids->obst.pitch;
//size_t slicePitchf = pitchf * grids->height;
// OBSTACLES, return
/*char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] > 0.5) return;*/
rho = get3d_value(grids->rho.ptr, pitch, slicePitch, i, j, k);
u[0] = get3d_value(grids->ux.ptr, pitch, slicePitch, i, j, k);
u[1] = get3d_value(grids->uy.ptr, pitch, slicePitch, i, j, k);
u[2] = get3d_value(grids->uz.ptr, pitch, slicePitch, i, j, k);
/*if (i == 37 && j == 3 && k == 10){
printf("ux = %.16e, uy = %.16e, uz = %.16e\n", u[0], u[1], u[2]);
// printf("fx = %.16e, fy = %.16e, fz = %.16e\n", fx, fy, fz);
}*/
char *devPtr = (char *)grids->Fx.ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
F[0] = row[i];
devPtr = (char *)grids->Fy.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
F[1] = row[i];
devPtr = (char *)grids->Fz.ptr;
slice = devPtr + k * slicePitch;
row = (double *)(slice + j * pitch);
F[2] = row[i];
for (int l = 0; l < 3; ++l){
for (int m = 0; m < 3; ++m){
uF[l][m] = u[l] * F[m] + F[l] * u[m];
}
}
for (d = 0; d < 19; ++d) {
e[0] = cx[d] * C;
e[1] = cy[d] * C;
e[2] = cz[d] * C;
double sum = 0;
cu = 3.0 * (e[0] * u[0] + e[1] * u[1] + e[2] * u[2]);
for (int l = 0; l < 3; ++l)
for (int m = 0; m < 3; ++m){
if (l == m) ee[l][m] = e[l] * e[m] - C2/3.0;
else ee[l][m] = e[l] * e[m];
}
for (int l = 0; l < 3; ++l){
for (int m = 0; m < 3; ++m){
sum += uF[l][m] * ee[m][l];
}
}
double fEQ = rho * t[d] * (1.0 + cu/(C2) + 0.5 * cu * cu/(C4) - 1.5 * (u[0] * u[0] + u[1] * u[1] + u[2] * u[2])/(C2));
double fIN = get3d_value(grids->fIN[d].ptr, pitch, slicePitch, i, j, k);
/* if(i == 0 && j == 0 && k == 2){
printf("rho = %.16e, fEQ = %.16e, fIN[%d] = %.16e\n", rho, fEQ, d, fIN);
}*/
char *devPtr = (char *)grids->fOUT[d].ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
// double fdt = t[d] * (1.0 - 0.5/OMEGA) * (3.0 * ((cx[d] * Cs - ux) * Fx +
// (cy[d] * Cs - uy)* Fy + (cz[d] * Cs - uz) * Fz) +
// 9.0 * (cx[d] * ux + cy[d] * uy + cz[d] * uz) * (cx[d] * Fx + cy[d] * Fy +
// cz[d] * Fz))/ Cs2 * (grids->dt) * UNIT_FACTOR;// pico gram/micron^3
double fdt = t[d] * (1.0 - 0.5/OMEGA) * (3.0 * (e[0] * F[0] + e[1] * F[1] + e[2] * F[2])/C2 +
4.5 * sum /C4) * (grids->dt);
// double fdt = 1.5 * t[d] * (Fx * cx[d] + Fy * cy[d] + Fz * cz[d]) * Cs * (grids->dt);
row[i] = fIN - (1.0/OMEGA) * (fIN - fEQ) + F0 * fdt/INIT_RHO/UMAX * UNIT_FACTOR;
/*if(i == 40 && j == 20 && k == 60 && d == 0){
printf("fdt = %.16e\n", fdt);
// printf("fIN[%d] = %.16e,fOUT[%d] = %.16e\n", d, fIN, d, row[i]);
// printf("Fx = %.16e, Fy = %.16e, Fz = %.16e\n", Fx,Fy,Fz);
}*/
}
}
///// fluid3d_stream_kernel() only do streaming
///// for fluid nodes.
///// Also, periodicity is assumed in x, y, and z directions.
///// When using inflow/outflow boundary condition at z-direction, this makes
///// fIN for nodes in inflow/outflow plane incorrect.
///// Results are save to fIN.
__global__ void
fluid3d_stream_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
// OBSTACLES needs to receive from fluid nodes as well.
/***
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + k * slicePitch;
float *row = (float *)(slice + j * pitch);
if (row[i] > 0.5) return;
***/
int si, sj, sk, d;
/*** current [i][j][k] receives from neighors. ****/
for (d = 0; d < 19; ++d)
{
si = i - (int)cx[d]; sj = j - (int)cy[d]; sk = k - (int)cz[d];
//// NOTE: Periodicity is assumed here.
//// This is not matter for most cases considered. Specific
//// boundary condition functions will be called after to set proper
//// boundary conditions.
if (si < 0) si = grids->width - 1;
if (sj < 0) sj = grids->height - 1;
if (sk < 0) sk = grids->depth - 1;
if (si == grids->width) si = 0;
if (sj == grids->height) sj = 0;
if (sk == grids->depth) sk = 0;
char *devPtr = (char *)grids->fIN[d].ptr;
char *slice = devPtr + k * slicePitch;
double *row = (double *)(slice + j * pitch);
row[i] = get3d_value(grids->fOUT[d].ptr, pitch, slicePitch, si, sj, sk);
}
}
__global__ void
fluid3d_obst_stream_kernel(void *g, int aBank)
{
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// int lx = grids->width;
// int ly = grids->height;
// int lz = grids->depth;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
int si, sj, sk, d;
/*** [i][j][k] receives from neighors. ****/
// Go through all nodes. Find fluid nodes which are linked to solid boundary nodes.
//// IMPORTANT: check if (si,sj,sk) is a solid boundary node.
//// If not, skip it.
//// This assumes that we use inflow-outflow boundary condition in depth direction
//// and wall boundary conditions at the rest boundaries, which are specified by
//// grids->obst.
for (d = 0; d < 19; ++d)
{
si = i - (int)cx[d]; sj = j - (int)cy[d]; sk = k - (int)cz[d];
if (si < 0) continue;
if (sj < 0) continue;
if (sk < 0) continue;
if (si >= grids->width) continue;
if (sj >= grids->height) continue;
if (sk >= grids->depth) continue;
char *devPtr = (char *)grids->obst.ptr;
char *slice = devPtr + sk * slicePitch;
float *row = (float *)(slice + sj * pitch);
if (row[si] < 0.5) continue;
devPtr = (char *)grids->fIN[d].ptr;
slice = devPtr + k * slicePitch;
row = (float *)(slice + j * pitch);
// Using fOUT[bb[d]] is wrong, has been reversed already in fluid3d_obst_bounce_back_kernel.
// row[i] = get3d_value(grids->fOUT[bb[d]].ptr, pitch, slicePitch, si, sj, sk);
row[i] = get3d_value(grids->fOUT[d].ptr, pitch, slicePitch, si, sj, sk);
}
}
//Calculate distributed force on LB grids
__global__ void
fluid3d_force_distribute_kernel(void *g, void *g_SEM, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
sem_GPUgrids *grids_SEM = (sem_GPUgrids *)g_SEM;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * grids->height;
float dx = grids->dx;
double threshold = 2 * dx;
double delta;
double x, y, z;
int numOfCells = grids_SEM->numOfCells;
int* numOfElements = grids_SEM->numOfElements;
double *sem_X = grids_SEM->X;
double *sem_Y = grids_SEM->Y;
double *sem_Z = grids_SEM->Z;
double *sem_Fx = grids_SEM->F_X;
double *sem_Fy = grids_SEM->F_Y;
double *sem_Fz = grids_SEM->F_Z;
size_t pitch_sem = grids_SEM->pitch;
char *devPtr = (char *)grids->Fx.ptr;
char *slice = devPtr + k * slicePitch;
double *Fx = (double *)(slice + j * pitch);
devPtr = (char *)grids->Fy.ptr;
slice = devPtr + k * slicePitch;
double *Fy = (double *)(slice + j * pitch);
devPtr = (char *)grids->Fz.ptr;
slice = devPtr + k * slicePitch;
double *Fz = (double *)(slice + j * pitch);
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
row = (double*)((char*)sem_Fx + m * pitch_sem);
Fx[i] += row[l] * delta;
row = (double*)((char*)sem_Fy + m * pitch_sem);
Fy[i] += row[l] * delta;
row = (double*)((char*)sem_Fz + m * pitch_sem);
Fz[i] += row[l] * delta;
}
//Periodic distribute
if (j < 2 || j > (grids->height - 2)){
if (j < 2) j = j + grids->height;
else j = j - grids->height;
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
row = (double*)((char*)sem_Fx + m * pitch_sem);
Fx[i] += row[l] * delta;
row = (double*)((char*)sem_Fy + m * pitch_sem);
Fy[i] += row[l] * delta;
row = (double*)((char*)sem_Fz + m * pitch_sem);
Fz[i] += row[l] * delta;
}
}
}//fluid3d_force_distribut_kernel
//Calculate distributed velocity on SEM
__global__ void
fluid3d_velocity_distribute_kernel(void *g, void *g_SEM, int aBank){
fluid_GPUgrids *grids = (fluid_GPUgrids *)g;
sem_GPUgrids *grids_SEM = (sem_GPUgrids *)g_SEM;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = aBank * blockDim.z + threadIdx.z;
// check thread in boundary
if (i >= grids->width) return;
if (j >= grids->height) return;
if (k >= grids->depth) return;
size_t pitch = grids->rho.pitch;
size_t slicePitch = pitch * (grids->height);
double dx = grids->dx, x, y, z;
double threshold = 2 * dx;
double delta;
int numOfCells = grids_SEM->numOfCells;
int *numOfElements = grids_SEM->numOfElements;
double *sem_X = grids_SEM->X;
double *sem_Y = grids_SEM->Y;
double *sem_Z = grids_SEM->Z;
double *sem_Vx = grids_SEM->V_X;
double *sem_Vy = grids_SEM->V_Y;
double *sem_Vz = grids_SEM->V_Z;
size_t pitch_sem = grids_SEM->pitch;
char *devPtr = (char *)grids->ux.ptr;
char *slice = devPtr + k * slicePitch;
double *ux = (double *)(slice + j * pitch);
devPtr = (char *)grids->uy.ptr;
slice = devPtr + k * slicePitch;
double *uy = (double *)(slice + j * pitch);
devPtr = (char *)grids->uz.ptr;
slice = devPtr + k * slicePitch;
double *uz = (double *)(slice + j * pitch);
devPtr = (char *)grids->rho.ptr;
slice = devPtr + k * slicePitch;
//double *rho = (double *)(slice + j * pitch);
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
/*if (i == 40 && j == 20 && k == 60 && m == 0){
printf("delta = %.16e, uy = %.16e\n", delta, uy[i]);
}*/
// row = (double*)((char*)sem_rho + m * pitch_sem);
// atomicAdd(&(row[l]), rho[i] * delta);
row = (double*)((char*)sem_Vx + m * pitch_sem);
atomicAdd(&(row[l]), ux[i] * delta);
/*if (m == 0){
printf("i = %d, j = %d, k = %d, Vx = %.16e\n", i, j, k,row[l]);
}*/
row = (double*)((char*)sem_Vy + m * pitch_sem);
atomicAdd(&(row[l]), uy[i] * delta);
// if (m == 0){
//printf("i = %d, j = %d, k = %d, uy = %.16e, delta = %e, um = %e\n",i,j,k,uy[i],delta,row[l]);
// printf("%e\n", uy[i] * delta);
// }
row = (double*)((char*)sem_Vz + m * pitch_sem);
atomicAdd(&(row[l]), uz[i] * delta);
//if (m == 0){
// printf("i = %d, j = %d, k = %d, uy = %.16e, um = %e\n",i,j,k,uy[i],row[l]);
// }
}
// Periodic distribute
if (j < 2 || j > (grids->height - 2)){
if (j < 2) j = j + grids->height;
else j = j - grids->height;
for (int l = 0; l < numOfCells; ++l)
for (int m = 0; m < numOfElements[l]; ++m)
{
double *row = (double*)((char*)sem_X + m * pitch_sem);
x = row[l];
row = (double*)((char*)sem_Y + m * pitch_sem);
y = row[l];
row = (double*)((char*)sem_Z + m * pitch_sem);
z = row[l];
if (abs(x- i * dx) >= threshold) continue;
if (abs(y- j * dx) >= threshold) continue;
if (abs(z- k * dx) >= threshold) continue;
delta = (0.25 * (1.0 + cos(0.5*PI*(x - i * dx)/dx))) * (0.25 * (1.0 + cos(0.5*PI*(y - j * dx)/dx))) *
(0.25 * (1.0 + cos(0.5*PI*(z - k * dx)/dx)));
// row = (double*)((char*)sem_rho + m * pitch_sem);
// atomicAdd(&(row[l]), rho[i] * delta);
row = (double*)((char*)sem_Vx + m * pitch_sem);
atomicAdd(&(row[l]), ux[i] * delta);
row = (double*)((char*)sem_Vy + m * pitch_sem);
atomicAdd(&(row[l]), uy[i] * delta);
row = (double*)((char*)sem_Vz + m * pitch_sem);
atomicAdd(&(row[l]), uz[i] * delta);
}
}
}//fluid3d_velocity_distribute_kernel
#endif
|
f937943e4655352d4d850ae293d5fa241e835720.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:NOT_ALL_VERIFIED
//--blockDim=1024 --gridDim=1024
//0-sized
__device__ double C[1024][0][1024];
__global__ void foo(double *H) {
C[threadIdx.x][threadIdx.y][threadIdx.z] = H[threadIdx.x];
}
| f937943e4655352d4d850ae293d5fa241e835720.cu | //xfail:NOT_ALL_VERIFIED
//--blockDim=1024 --gridDim=1024
//0-sized
__device__ double C[1024][0][1024];
__global__ void foo(double *H) {
C[threadIdx.x][threadIdx.y][threadIdx.z] = H[threadIdx.x];
}
|
60a4688d36a42a75e0afa3ca90f7d098901782c4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
using std::string;
using std::vector;
using cudf::device_span;
using cudf::host_span;
using cudf::detail::make_device_uvector_async;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return Tuple of data_type and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(const std::string &dtype)
{
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64}, column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32}, column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
parse_options_view const &opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter || first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() && first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
template <typename C>
void erase_except_last(C &container, rmm::cuda_stream_view stream)
{
cudf::detail::device_single_thread(
[span = device_span<typename C::value_type>{container}] __device__() mutable {
span.front() = span.back();
},
stream);
container.resize(1, stream);
}
std::pair<rmm::device_uvector<char>, reader::impl::selected_rows_offsets>
reader::impl::select_data_and_row_offsets(rmm::cuda_stream_view stream)
{
auto range_offset = opts_.get_byte_range_offset();
auto range_size = opts_.get_byte_range_size();
auto skip_rows = opts_.get_skiprows();
auto skip_end_rows = opts_.get_skipfooter();
auto num_rows = opts_.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = ::max(opts_.get_names().size(), opts_.get_dtypes().size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Transfer source data to GPU
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char *>(buffer->data()),
buffer->size());
std::vector<char> h_uncomp_data_owner;
if (compression_type_ != "none") {
h_uncomp_data_owner = get_uncompressed_data(h_data, compression_type_);
h_data = h_uncomp_data_owner;
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset = (range_offset != 0) ? find_first_row_start(h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || opts_.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
auto data_row_offsets =
load_data_and_gather_row_offsets(h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
auto &row_offsets = data_row_offsets.second;
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets.size()) {
row_offsets.shrink(row_offsets.size() - skip_end_rows);
}
return data_row_offsets;
}
return {rmm::device_uvector<char>{0, stream}, selected_rows_offsets{stream}};
}
table_with_metadata reader::impl::read(rmm::cuda_stream_view stream)
{
auto const data_row_offsets = select_data_and_row_offsets(stream);
auto const &data = data_row_offsets.first;
auto const &row_offsets = data_row_offsets.second;
// Exclude the end-of-data row from number of rows with actual data
num_records_ = ::max(row_offsets.size(), 1ul) - 1;
// Check if the user gave us a list of column names
if (not opts_.get_names().empty()) {
column_flags_.resize(opts_.get_names().size(), column_parse::enabled);
col_names_ = opts_.get_names();
} else {
col_names_ = setColumnNames(header_, opts.view(), opts_.get_header(), opts_.get_prefix());
num_actual_cols_ = num_active_cols_ = col_names_.size();
column_flags_.resize(num_actual_cols_, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names_.size(); ++col_idx) {
if (col_names_[col_idx].empty()) {
col_names_[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names_) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (opts_.is_enabled_mangle_dupe_cols()) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names_.data();
column_flags_[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!opts_.is_enabled_mangle_dupe_cols()) { num_active_cols_ = col_names_histogram.size(); }
}
// User can specify which columns should be parsed
if (!opts_.get_use_cols_indexes().empty() || !opts_.get_use_cols_names().empty()) {
std::fill(column_flags_.begin(), column_flags_.end(), column_parse::disabled);
for (const auto index : opts_.get_use_cols_indexes()) {
column_flags_[index] = column_parse::enabled;
}
num_active_cols_ = opts_.get_use_cols_indexes().size();
for (const auto &name : opts_.get_use_cols_names()) {
const auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
column_flags_[it - col_names_.begin()] = column_parse::enabled;
num_active_cols_++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!opts_.get_infer_date_indexes().empty() || !opts_.get_infer_date_names().empty()) {
for (const auto index : opts_.get_infer_date_indexes()) {
column_flags_[index] |= column_parse::as_datetime;
}
for (const auto &name : opts_.get_infer_date_names()) {
auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
column_flags_[it - col_names_.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols_ == 0) { return {std::make_unique<table>(), {}}; }
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
auto column_types = gather_column_types(data, row_offsets, stream);
out_columns.reserve(column_types.size());
if (num_records_ != 0) {
auto out_buffers = decode_data(data, row_offsets, column_types, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && opts.quotechar != '\0' &&
opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
std::unique_ptr<column> col = cudf::make_strings_column(*out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr_));
} else {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr_));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_cols_; ++col) {
if (column_flags_[col] & column_parse::enabled) {
metadata.column_names.emplace_back(col_names_[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
size_t reader::impl::find_first_row_start(host_span<char const> data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != opts.terminator) { ++pos; }
return ::min(pos + 1, data.size());
}
std::pair<rmm::device_uvector<char>, reader::impl::selected_rows_offsets>
reader::impl::load_data_and_gather_row_offsets(host_span<char const> data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
rmm::cuda_stream_view stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = ::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks);
size_t buffer_pos = ::min(range_begin - ::min(range_begin, sizeof(char)), data.size());
size_t pos = ::min(range_begin, data.size());
size_t header_rows = (opts_.get_header() >= 0) ? opts_.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
// Reserve memory by allocating and then resetting the size
rmm::device_uvector<char> d_data{
(load_whole_file) ? data.size() : ::min(buffer_size * 2, data.size()), stream};
d_data.resize(0, stream);
rmm::device_uvector<uint64_t> all_row_offsets{0, stream};
do {
size_t target_pos = ::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
auto const previous_data_size = d_data.size();
d_data.resize(target_pos - buffer_pos, stream);
CUDA_TRY(hipMemcpyAsync(d_data.begin() + previous_data_size,
data.begin() + buffer_pos + previous_data_size,
target_pos - buffer_pos - previous_data_size,
hipMemcpyDefault,
stream.value()));
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(opts.view(),
row_ctx.device_ptr(),
device_span<uint64_t>(),
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
all_row_offsets.resize(total_rows - skip_rows, stream);
CUDA_TRY(hipMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyHostToDevice,
stream.value()));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(opts.view(),
row_ctx.device_ptr(),
all_row_offsets,
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) { rows_out_of_range += row_ctx[i]; }
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
all_row_offsets.size() - ::min(rows_out_of_range - 1, all_row_offsets.size());
all_row_offsets.resize(new_row_offsets_size, stream);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (all_row_offsets.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks =
cudf::io::csv::gpu::count_blank_rows(opts.view(), d_data, all_row_offsets, stream);
if (all_row_offsets.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = ::max(d_data.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
erase_except_last(d_data, stream);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
auto const non_blank_row_offsets =
io::csv::gpu::remove_blank_rows(opts.view(), d_data, all_row_offsets, stream);
auto row_offsets = selected_rows_offsets{std::move(all_row_offsets), non_blank_row_offsets};
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets.size()) {
CUDA_TRY(hipMemcpyAsync(row_ctx.host_ptr(),
row_offsets.data() + header_row_index,
2 * sizeof(uint64_t),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header_.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) { row_offsets.erase_first_n(header_rows); }
}
// Apply num_rows limit
if (num_rows >= 0 && static_cast<size_t>(num_rows) < row_offsets.size() - 1) {
row_offsets.shrink(num_rows + 1);
}
return {std::move(d_data), std::move(row_offsets)};
}
std::vector<data_type> reader::impl::gather_column_types(device_span<char const> data,
device_span<uint64_t const> row_offsets,
rmm::cuda_stream_view stream)
{
std::vector<data_type> dtypes;
if (opts_.get_dtypes().empty()) {
if (num_records_ == 0) {
dtypes.resize(num_active_cols_, data_type{type_id::EMPTY});
} else {
auto column_stats =
cudf::io::csv::gpu::detect_column_types(opts.view(),
data,
make_device_uvector_async(column_flags_, stream),
row_offsets,
num_active_cols_,
stream);
stream.synchronize();
for (int col = 0; col < num_active_cols_; col++) {
unsigned long long int_count_total = column_stats[col].big_int_count +
column_stats[col].negative_small_int_count +
column_stats[col].positive_small_int_count;
if (column_stats[col].null_count == num_records_) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].string_count > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].datetime_count > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].bool_count > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].float_count > 0L ||
(column_stats[col].float_count == 0L && int_count_total > 0L &&
column_stats[col].null_count > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else if (column_stats[col].big_int_count == 0) {
dtypes.emplace_back(cudf::type_id::INT64);
} else if (column_stats[col].big_int_count != 0 &&
column_stats[col].negative_small_int_count != 0) {
dtypes.emplace_back(cudf::type_id::STRING);
} else {
// Integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::UINT64);
}
}
}
} else {
const bool is_dict =
std::all_of(opts_.get_dtypes().begin(), opts_.get_dtypes().end(), [](const auto &s) {
return s.find(':') != std::string::npos;
});
if (!is_dict) {
if (opts_.get_dtypes().size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[0]);
dtypes.resize(num_active_cols_, dtype_);
for (int col = 0; col < num_actual_cols_; col++) { column_flags_[col] |= col_flags_; }
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(opts_.get_dtypes().size()) >= num_actual_cols_,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (column_flags_[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[col]);
column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : opts_.get_dtypes()) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (column_flags_[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names_[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(col_type_map[col_names_[col]]);
column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
}
if (opts_.get_timestamp_type().id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) { type = opts_.get_timestamp_type(); }
}
}
for (size_t i = 0; i < dtypes.size(); i++) {
// Replace EMPTY dtype with STRING
if (dtypes[i].id() == type_id::EMPTY) { dtypes[i] = data_type{type_id::STRING}; }
}
return dtypes;
}
std::vector<column_buffer> reader::impl::decode_data(device_span<char const> data,
device_span<uint64_t const> row_offsets,
host_span<data_type const> column_types,
rmm::cuda_stream_view stream)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_cols_; ++col) {
if (column_flags_[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records_,
true,
stream,
is_final_allocation ? mr_ : rmm::mr::get_current_device_resource());
out_buffer.name = col_names_[col];
out_buffer.null_count() = UNKNOWN_NULL_COUNT;
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void *> h_data(num_active_cols_);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols_);
for (int i = 0; i < num_active_cols_; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
cudf::io::csv::gpu::decode_row_column_data(opts.view(),
data,
make_device_uvector_async(column_flags_, stream),
row_offsets,
make_device_uvector_async(column_types, stream),
make_device_uvector_async(h_data, stream),
make_device_uvector_async(h_valid, stream),
stream);
return out_buffers;
}
/**
* @brief Create a serialized trie for N/A value matching, based on the options.
*/
cudf::detail::trie create_na_trie(char quotechar,
csv_reader_options const &reader_opts,
rmm::cuda_stream_view stream)
{
// Default values to recognize as null values
static std::vector<std::string> const default_na_values{"",
"#N/A",
"#N/A N/A",
"#NA",
"-1.#IND",
"-1.#QNAN",
"-NaN",
"-nan",
"1.#IND",
"1.#QNAN",
"<NA>",
"N/A",
"NA",
"NULL",
"NaN",
"n/a",
"nan",
"null"};
if (!reader_opts.is_enabled_na_filter()) { return cudf::detail::trie(0, stream); }
std::vector<std::string> na_values = reader_opts.get_na_values();
if (reader_opts.is_enabled_keep_default_na()) {
na_values.insert(na_values.end(), default_na_values.begin(), default_na_values.end());
}
// Pandas treats empty strings as N/A if empty fields are treated as N/A
if (std::find(na_values.begin(), na_values.end(), "") != na_values.end()) {
na_values.push_back(std::string(2, quotechar));
}
return cudf::detail::create_serialized_trie(na_values, stream);
}
parse_options make_parse_options(csv_reader_options const &reader_opts,
rmm::cuda_stream_view stream)
{
auto parse_opts = parse_options{};
if (reader_opts.is_enabled_delim_whitespace()) {
parse_opts.delimiter = ' ';
parse_opts.multi_delimiter = true;
} else {
parse_opts.delimiter = reader_opts.get_delimiter();
parse_opts.multi_delimiter = false;
}
parse_opts.terminator = reader_opts.get_lineterminator();
if (reader_opts.get_quotechar() != '\0' && reader_opts.get_quoting() != quote_style::NONE) {
parse_opts.quotechar = reader_opts.get_quotechar();
parse_opts.keepquotes = false;
parse_opts.doublequote = reader_opts.is_enabled_doublequote();
} else {
parse_opts.quotechar = '\0';
parse_opts.keepquotes = true;
parse_opts.doublequote = false;
}
parse_opts.skipblanklines = reader_opts.is_enabled_skip_blank_lines();
parse_opts.comment = reader_opts.get_comment();
parse_opts.dayfirst = reader_opts.is_enabled_dayfirst();
parse_opts.decimal = reader_opts.get_decimal();
parse_opts.thousands = reader_opts.get_thousands();
CUDF_EXPECTS(parse_opts.decimal != parse_opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(parse_opts.thousands != parse_opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
// Handle user-defined true values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (reader_opts.get_true_values().size() != 0) {
parse_opts.trie_true =
cudf::detail::create_serialized_trie(reader_opts.get_true_values(), stream);
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (reader_opts.get_false_values().size() != 0) {
parse_opts.trie_false =
cudf::detail::create_serialized_trie(reader_opts.get_false_values(), stream);
}
// Handle user-defined N/A values, whereby field data is treated as null
parse_opts.trie_na = create_na_trie(parse_opts.quotechar, reader_opts, stream);
return parse_opts;
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
: mr_(mr), source_(std::move(source)), filepath_(filepath), opts_(options)
{
num_actual_cols_ = opts_.get_names().size();
num_active_cols_ = num_actual_cols_;
compression_type_ =
infer_compression_type(opts_.get_compression(),
filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
opts = make_parse_options(options, stream);
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, stream, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, stream, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(rmm::cuda_stream_view stream) { return _impl->read(stream); }
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
| 60a4688d36a42a75e0afa3ca90f7d098901782c4.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO CSV reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/io_uncomp.h>
#include <io/utilities/parsing_utils.cuh>
#include <io/utilities/type_conversion.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/io/types.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <algorithm>
#include <iostream>
#include <numeric>
#include <tuple>
#include <unordered_map>
using std::string;
using std::vector;
using cudf::device_span;
using cudf::host_span;
using cudf::detail::make_device_uvector_async;
namespace cudf {
namespace io {
namespace detail {
namespace csv {
using namespace cudf::io::csv;
using namespace cudf::io;
/**
* @brief Estimates the maximum expected length or a row, based on the number
* of columns
*
* If the number of columns is not available, it will return a value large
* enough for most use cases
*
* @param[in] num_columns Number of columns in the CSV file (optional)
*
* @return Estimated maximum size of a row, in bytes
*/
constexpr size_t calculateMaxRowSize(int num_columns = 0) noexcept
{
constexpr size_t max_row_bytes = 16 * 1024; // 16KB
constexpr size_t column_bytes = 64;
constexpr size_t base_padding = 1024; // 1KB
if (num_columns == 0) {
// Use flat size if the number of columns is not known
return max_row_bytes;
} else {
// Expand the size based on the number of columns, if available
return base_padding + num_columns * column_bytes;
}
}
/**
* @brief Translates a dtype string and returns its dtype enumeration and any
* extended dtype flags that are supported by cuIO. Often, this is a column
* with the same underlying dtype the basic types, but with different parsing
* interpretations.
*
* @param[in] dtype String containing the basic or extended dtype
*
* @return Tuple of data_type and flags
*/
std::tuple<data_type, column_parse::flags> get_dtype_info(const std::string &dtype)
{
if (dtype == "hex" || dtype == "hex64") {
return std::make_tuple(data_type{cudf::type_id::INT64}, column_parse::as_hexadecimal);
}
if (dtype == "hex32") {
return std::make_tuple(data_type{cudf::type_id::INT32}, column_parse::as_hexadecimal);
}
return std::make_tuple(convert_string_to_dtype(dtype), column_parse::as_default);
}
/**
* @brief Removes the first and Last quote in the string
*/
string removeQuotes(string str, char quotechar)
{
// Exclude first and last quotation char
const size_t first_quote = str.find(quotechar);
if (first_quote != string::npos) { str.erase(first_quote, 1); }
const size_t last_quote = str.rfind(quotechar);
if (last_quote != string::npos) { str.erase(last_quote, 1); }
return str;
}
/**
* @brief Parse the first row to set the column names in the raw_csv parameter.
* The first row can be either the header row, or the first data row
*/
std::vector<std::string> setColumnNames(std::vector<char> const &header,
parse_options_view const &opts,
int header_row,
std::string prefix)
{
std::vector<std::string> col_names;
// If there is only a single character then it would be the terminator
if (header.size() <= 1) { return col_names; }
std::vector<char> first_row = header;
int num_cols = 0;
bool quotation = false;
for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) {
// Flip the quotation flag if current character is a quotechar
if (first_row[pos] == opts.quotechar) {
quotation = !quotation;
}
// Check if end of a column/row
else if (pos == first_row.size() - 1 || (!quotation && first_row[pos] == opts.terminator) ||
(!quotation && first_row[pos] == opts.delimiter)) {
// This is the header, add the column name
if (header_row >= 0) {
// Include the current character, in case the line is not terminated
int col_name_len = pos - prev + 1;
// Exclude the delimiter/terminator is present
if (first_row[pos] == opts.delimiter || first_row[pos] == opts.terminator) {
--col_name_len;
}
// Also exclude '\r' character at the end of the column name if it's
// part of the terminator
if (col_name_len > 0 && opts.terminator == '\n' && first_row[pos] == '\n' &&
first_row[pos - 1] == '\r') {
--col_name_len;
}
const string new_col_name(first_row.data() + prev, col_name_len);
col_names.push_back(removeQuotes(new_col_name, opts.quotechar));
// Stop parsing when we hit the line terminator; relevant when there is
// a blank line following the header. In this case, first_row includes
// multiple line terminators at the end, as the new recStart belongs to
// a line that comes after the blank line(s)
if (!quotation && first_row[pos] == opts.terminator) { break; }
} else {
// This is the first data row, add the automatically generated name
col_names.push_back(prefix + std::to_string(num_cols));
}
num_cols++;
// Skip adjacent delimiters if delim_whitespace is set
while (opts.multi_delimiter && pos < first_row.size() && first_row[pos] == opts.delimiter &&
first_row[pos + 1] == opts.delimiter) {
++pos;
}
prev = pos + 1;
}
}
return col_names;
}
template <typename C>
void erase_except_last(C &container, rmm::cuda_stream_view stream)
{
cudf::detail::device_single_thread(
[span = device_span<typename C::value_type>{container}] __device__() mutable {
span.front() = span.back();
},
stream);
container.resize(1, stream);
}
std::pair<rmm::device_uvector<char>, reader::impl::selected_rows_offsets>
reader::impl::select_data_and_row_offsets(rmm::cuda_stream_view stream)
{
auto range_offset = opts_.get_byte_range_offset();
auto range_size = opts_.get_byte_range_size();
auto skip_rows = opts_.get_skiprows();
auto skip_end_rows = opts_.get_skipfooter();
auto num_rows = opts_.get_nrows();
if (range_offset > 0 || range_size > 0) {
CUDF_EXPECTS(compression_type_ == "none",
"Reading compressed data using `byte range` is unsupported");
}
size_t map_range_size = 0;
if (range_size != 0) {
const auto num_columns = std::max(opts_.get_names().size(), opts_.get_dtypes().size());
map_range_size = range_size + calculateMaxRowSize(num_columns);
}
// Support delayed opening of the file if using memory mapping datasource
// This allows only mapping of a subset of the file if using byte range
if (source_ == nullptr) {
assert(!filepath_.empty());
source_ = datasource::create(filepath_, range_offset, map_range_size);
}
// Transfer source data to GPU
if (!source_->is_empty()) {
auto data_size = (map_range_size != 0) ? map_range_size : source_->size();
auto buffer = source_->host_read(range_offset, data_size);
auto h_data = host_span<char const>( //
reinterpret_cast<const char *>(buffer->data()),
buffer->size());
std::vector<char> h_uncomp_data_owner;
if (compression_type_ != "none") {
h_uncomp_data_owner = get_uncompressed_data(h_data, compression_type_);
h_data = h_uncomp_data_owner;
}
// None of the parameters for row selection is used, we are parsing the entire file
const bool load_whole_file = range_offset == 0 && range_size == 0 && skip_rows <= 0 &&
skip_end_rows <= 0 && num_rows == -1;
// With byte range, find the start of the first data row
size_t const data_start_offset = (range_offset != 0) ? find_first_row_start(h_data) : 0;
// TODO: Allow parsing the header outside the mapped range
CUDF_EXPECTS((range_offset == 0 || opts_.get_header() < 0),
"byte_range offset with header not supported");
// Gather row offsets
auto data_row_offsets =
load_data_and_gather_row_offsets(h_data,
data_start_offset,
(range_size) ? range_size : h_data.size(),
(skip_rows > 0) ? skip_rows : 0,
num_rows,
load_whole_file,
stream);
auto &row_offsets = data_row_offsets.second;
// Exclude the rows that are to be skipped from the end
if (skip_end_rows > 0 && static_cast<size_t>(skip_end_rows) < row_offsets.size()) {
row_offsets.shrink(row_offsets.size() - skip_end_rows);
}
return data_row_offsets;
}
return {rmm::device_uvector<char>{0, stream}, selected_rows_offsets{stream}};
}
table_with_metadata reader::impl::read(rmm::cuda_stream_view stream)
{
auto const data_row_offsets = select_data_and_row_offsets(stream);
auto const &data = data_row_offsets.first;
auto const &row_offsets = data_row_offsets.second;
// Exclude the end-of-data row from number of rows with actual data
num_records_ = std::max(row_offsets.size(), 1ul) - 1;
// Check if the user gave us a list of column names
if (not opts_.get_names().empty()) {
column_flags_.resize(opts_.get_names().size(), column_parse::enabled);
col_names_ = opts_.get_names();
} else {
col_names_ = setColumnNames(header_, opts.view(), opts_.get_header(), opts_.get_prefix());
num_actual_cols_ = num_active_cols_ = col_names_.size();
column_flags_.resize(num_actual_cols_, column_parse::enabled);
// Rename empty column names to "Unnamed: col_index"
for (size_t col_idx = 0; col_idx < col_names_.size(); ++col_idx) {
if (col_names_[col_idx].empty()) {
col_names_[col_idx] = string("Unnamed: ") + std::to_string(col_idx);
}
}
// Looking for duplicates
std::unordered_map<string, int> col_names_histogram;
for (auto &col_name : col_names_) {
// Operator [] inserts a default-initialized value if the given key is not
// present
if (++col_names_histogram[col_name] > 1) {
if (opts_.is_enabled_mangle_dupe_cols()) {
// Rename duplicates of column X as X.1, X.2, ...; First appearance
// stays as X
col_name += "." + std::to_string(col_names_histogram[col_name] - 1);
} else {
// All duplicate columns will be ignored; First appearance is parsed
const auto idx = &col_name - col_names_.data();
column_flags_[idx] = column_parse::disabled;
}
}
}
// Update the number of columns to be processed, if some might have been
// removed
if (!opts_.is_enabled_mangle_dupe_cols()) { num_active_cols_ = col_names_histogram.size(); }
}
// User can specify which columns should be parsed
if (!opts_.get_use_cols_indexes().empty() || !opts_.get_use_cols_names().empty()) {
std::fill(column_flags_.begin(), column_flags_.end(), column_parse::disabled);
for (const auto index : opts_.get_use_cols_indexes()) {
column_flags_[index] = column_parse::enabled;
}
num_active_cols_ = opts_.get_use_cols_indexes().size();
for (const auto &name : opts_.get_use_cols_names()) {
const auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
column_flags_[it - col_names_.begin()] = column_parse::enabled;
num_active_cols_++;
}
}
}
// User can specify which columns should be inferred as datetime
if (!opts_.get_infer_date_indexes().empty() || !opts_.get_infer_date_names().empty()) {
for (const auto index : opts_.get_infer_date_indexes()) {
column_flags_[index] |= column_parse::as_datetime;
}
for (const auto &name : opts_.get_infer_date_names()) {
auto it = std::find(col_names_.begin(), col_names_.end(), name);
if (it != col_names_.end()) {
column_flags_[it - col_names_.begin()] |= column_parse::as_datetime;
}
}
}
// Return empty table rather than exception if nothing to load
if (num_active_cols_ == 0) { return {std::make_unique<table>(), {}}; }
auto metadata = table_metadata{};
auto out_columns = std::vector<std::unique_ptr<cudf::column>>();
auto column_types = gather_column_types(data, row_offsets, stream);
out_columns.reserve(column_types.size());
if (num_records_ != 0) {
auto out_buffers = decode_data(data, row_offsets, column_types, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
metadata.column_names.emplace_back(out_buffers[i].name);
if (column_types[i].id() == type_id::STRING && opts.quotechar != '\0' &&
opts.doublequote == true) {
// PANDAS' default behavior of enabling doublequote for two consecutive
// quotechars in quoted fields results in reduction to a single quotechar
// TODO: Would be much more efficient to perform this operation in-place
// during the conversion stage
const std::string quotechar(1, opts.quotechar);
const std::string dblquotechar(2, opts.quotechar);
std::unique_ptr<column> col = cudf::make_strings_column(*out_buffers[i]._strings, stream);
out_columns.emplace_back(
cudf::strings::replace(col->view(), dblquotechar, quotechar, -1, mr_));
} else {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, mr_));
}
}
} else {
// Create empty columns
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Handle empty metadata
for (int col = 0; col < num_actual_cols_; ++col) {
if (column_flags_[col] & column_parse::enabled) {
metadata.column_names.emplace_back(col_names_[col]);
}
}
}
return {std::make_unique<table>(std::move(out_columns)), std::move(metadata)};
}
size_t reader::impl::find_first_row_start(host_span<char const> data)
{
// For now, look for the first terminator (assume the first terminator isn't within a quote)
// TODO: Attempt to infer this from the data
size_t pos = 0;
while (pos < data.size() && data[pos] != opts.terminator) { ++pos; }
return std::min(pos + 1, data.size());
}
std::pair<rmm::device_uvector<char>, reader::impl::selected_rows_offsets>
reader::impl::load_data_and_gather_row_offsets(host_span<char const> data,
size_t range_begin,
size_t range_end,
size_t skip_rows,
int64_t num_rows,
bool load_whole_file,
rmm::cuda_stream_view stream)
{
constexpr size_t max_chunk_bytes = 64 * 1024 * 1024; // 64MB
size_t buffer_size = std::min(max_chunk_bytes, data.size());
size_t max_blocks =
std::max<size_t>((buffer_size / cudf::io::csv::gpu::rowofs_block_bytes) + 1, 2);
hostdevice_vector<uint64_t> row_ctx(max_blocks);
size_t buffer_pos = std::min(range_begin - std::min(range_begin, sizeof(char)), data.size());
size_t pos = std::min(range_begin, data.size());
size_t header_rows = (opts_.get_header() >= 0) ? opts_.get_header() + 1 : 0;
uint64_t ctx = 0;
// For compatibility with the previous parser, a row is considered in-range if the
// previous row terminator is within the given range
range_end += (range_end < data.size());
// Reserve memory by allocating and then resetting the size
rmm::device_uvector<char> d_data{
(load_whole_file) ? data.size() : std::min(buffer_size * 2, data.size()), stream};
d_data.resize(0, stream);
rmm::device_uvector<uint64_t> all_row_offsets{0, stream};
do {
size_t target_pos = std::min(pos + max_chunk_bytes, data.size());
size_t chunk_size = target_pos - pos;
auto const previous_data_size = d_data.size();
d_data.resize(target_pos - buffer_pos, stream);
CUDA_TRY(cudaMemcpyAsync(d_data.begin() + previous_data_size,
data.begin() + buffer_pos + previous_data_size,
target_pos - buffer_pos - previous_data_size,
cudaMemcpyDefault,
stream.value()));
// Pass 1: Count the potential number of rows in each character block for each
// possible parser state at the beginning of the block.
uint32_t num_blocks = cudf::io::csv::gpu::gather_row_offsets(opts.view(),
row_ctx.device_ptr(),
device_span<uint64_t>(),
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Sum up the rows in each character block, selecting the row count that
// corresponds to the current input context. Also stores the now known input
// context per character block that will be needed by the second pass.
for (uint32_t i = 0; i < num_blocks; i++) {
uint64_t ctx_next = cudf::io::csv::gpu::select_row_context(ctx, row_ctx[i]);
row_ctx[i] = ctx;
ctx = ctx_next;
}
size_t total_rows = ctx >> 2;
if (total_rows > skip_rows) {
// At least one row in range in this batch
all_row_offsets.resize(total_rows - skip_rows, stream);
CUDA_TRY(cudaMemcpyAsync(row_ctx.device_ptr(),
row_ctx.host_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyHostToDevice,
stream.value()));
// Pass 2: Output row offsets
cudf::io::csv::gpu::gather_row_offsets(opts.view(),
row_ctx.device_ptr(),
all_row_offsets,
d_data,
chunk_size,
pos,
buffer_pos,
data.size(),
range_begin,
range_end,
skip_rows,
stream);
// With byte range, we want to keep only one row out of the specified range
if (range_end < data.size()) {
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_ctx.device_ptr(),
num_blocks * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t rows_out_of_range = 0;
for (uint32_t i = 0; i < num_blocks; i++) { rows_out_of_range += row_ctx[i]; }
if (rows_out_of_range != 0) {
// Keep one row out of range (used to infer length of previous row)
auto new_row_offsets_size =
all_row_offsets.size() - std::min(rows_out_of_range - 1, all_row_offsets.size());
all_row_offsets.resize(new_row_offsets_size, stream);
// Implies we reached the end of the range
break;
}
}
// num_rows does not include blank rows
if (num_rows >= 0) {
if (all_row_offsets.size() > header_rows + static_cast<size_t>(num_rows)) {
size_t num_blanks =
cudf::io::csv::gpu::count_blank_rows(opts.view(), d_data, all_row_offsets, stream);
if (all_row_offsets.size() - num_blanks > header_rows + static_cast<size_t>(num_rows)) {
// Got the desired number of rows
break;
}
}
}
} else {
// Discard data (all rows below skip_rows), keeping one character for history
size_t discard_bytes = std::max(d_data.size(), sizeof(char)) - sizeof(char);
if (discard_bytes != 0) {
erase_except_last(d_data, stream);
buffer_pos += discard_bytes;
}
}
pos = target_pos;
} while (pos < data.size());
auto const non_blank_row_offsets =
io::csv::gpu::remove_blank_rows(opts.view(), d_data, all_row_offsets, stream);
auto row_offsets = selected_rows_offsets{std::move(all_row_offsets), non_blank_row_offsets};
// Remove header rows and extract header
const size_t header_row_index = std::max<size_t>(header_rows, 1) - 1;
if (header_row_index + 1 < row_offsets.size()) {
CUDA_TRY(cudaMemcpyAsync(row_ctx.host_ptr(),
row_offsets.data() + header_row_index,
2 * sizeof(uint64_t),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const auto header_start = buffer_pos + row_ctx[0];
const auto header_end = buffer_pos + row_ctx[1];
CUDF_EXPECTS(header_start <= header_end && header_end <= data.size(),
"Invalid csv header location");
header_.assign(data.begin() + header_start, data.begin() + header_end);
if (header_rows > 0) { row_offsets.erase_first_n(header_rows); }
}
// Apply num_rows limit
if (num_rows >= 0 && static_cast<size_t>(num_rows) < row_offsets.size() - 1) {
row_offsets.shrink(num_rows + 1);
}
return {std::move(d_data), std::move(row_offsets)};
}
std::vector<data_type> reader::impl::gather_column_types(device_span<char const> data,
device_span<uint64_t const> row_offsets,
rmm::cuda_stream_view stream)
{
std::vector<data_type> dtypes;
if (opts_.get_dtypes().empty()) {
if (num_records_ == 0) {
dtypes.resize(num_active_cols_, data_type{type_id::EMPTY});
} else {
auto column_stats =
cudf::io::csv::gpu::detect_column_types(opts.view(),
data,
make_device_uvector_async(column_flags_, stream),
row_offsets,
num_active_cols_,
stream);
stream.synchronize();
for (int col = 0; col < num_active_cols_; col++) {
unsigned long long int_count_total = column_stats[col].big_int_count +
column_stats[col].negative_small_int_count +
column_stats[col].positive_small_int_count;
if (column_stats[col].null_count == num_records_) {
// Entire column is NULL; allocate the smallest amount of memory
dtypes.emplace_back(cudf::type_id::INT8);
} else if (column_stats[col].string_count > 0L) {
dtypes.emplace_back(cudf::type_id::STRING);
} else if (column_stats[col].datetime_count > 0L) {
dtypes.emplace_back(cudf::type_id::TIMESTAMP_NANOSECONDS);
} else if (column_stats[col].bool_count > 0L) {
dtypes.emplace_back(cudf::type_id::BOOL8);
} else if (column_stats[col].float_count > 0L ||
(column_stats[col].float_count == 0L && int_count_total > 0L &&
column_stats[col].null_count > 0L)) {
// The second condition has been added to conform to
// PANDAS which states that a column of integers with
// a single NULL record need to be treated as floats.
dtypes.emplace_back(cudf::type_id::FLOAT64);
} else if (column_stats[col].big_int_count == 0) {
dtypes.emplace_back(cudf::type_id::INT64);
} else if (column_stats[col].big_int_count != 0 &&
column_stats[col].negative_small_int_count != 0) {
dtypes.emplace_back(cudf::type_id::STRING);
} else {
// Integers are stored as 64-bit to conform to PANDAS
dtypes.emplace_back(cudf::type_id::UINT64);
}
}
}
} else {
const bool is_dict =
std::all_of(opts_.get_dtypes().begin(), opts_.get_dtypes().end(), [](const auto &s) {
return s.find(':') != std::string::npos;
});
if (!is_dict) {
if (opts_.get_dtypes().size() == 1) {
// If it's a single dtype, assign that dtype to all active columns
data_type dtype_;
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[0]);
dtypes.resize(num_active_cols_, dtype_);
for (int col = 0; col < num_actual_cols_; col++) { column_flags_[col] |= col_flags_; }
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
} else {
// If it's a list, assign dtypes to active columns in the given order
CUDF_EXPECTS(static_cast<int>(opts_.get_dtypes().size()) >= num_actual_cols_,
"Must specify data types for all columns");
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (column_flags_[col] & column_parse::enabled) {
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(opts_.get_dtypes()[col]);
column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
} else {
// Translate vector of `name : dtype` strings to map
// NOTE: Incoming pairs can be out-of-order from column names in dataset
std::unordered_map<std::string, std::string> col_type_map;
for (const auto &pair : opts_.get_dtypes()) {
const auto pos = pair.find_last_of(':');
const auto name = pair.substr(0, pos);
const auto dtype = pair.substr(pos + 1, pair.size());
col_type_map[name] = dtype;
}
auto dtype_ = std::back_inserter(dtypes);
for (int col = 0; col < num_actual_cols_; col++) {
if (column_flags_[col] & column_parse::enabled) {
CUDF_EXPECTS(col_type_map.find(col_names_[col]) != col_type_map.end(),
"Must specify data types for all active columns");
column_parse::flags col_flags_;
std::tie(dtype_, col_flags_) = get_dtype_info(col_type_map[col_names_[col]]);
column_flags_[col] |= col_flags_;
CUDF_EXPECTS(dtypes.back().id() != cudf::type_id::EMPTY, "Unsupported data type");
}
}
}
}
if (opts_.get_timestamp_type().id() != cudf::type_id::EMPTY) {
for (auto &type : dtypes) {
if (cudf::is_timestamp(type)) { type = opts_.get_timestamp_type(); }
}
}
for (size_t i = 0; i < dtypes.size(); i++) {
// Replace EMPTY dtype with STRING
if (dtypes[i].id() == type_id::EMPTY) { dtypes[i] = data_type{type_id::STRING}; }
}
return dtypes;
}
std::vector<column_buffer> reader::impl::decode_data(device_span<char const> data,
device_span<uint64_t const> row_offsets,
host_span<data_type const> column_types,
rmm::cuda_stream_view stream)
{
// Alloc output; columns' data memory is still expected for empty dataframe
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (int col = 0, active_col = 0; col < num_actual_cols_; ++col) {
if (column_flags_[col] & column_parse::enabled) {
const bool is_final_allocation = column_types[active_col].id() != type_id::STRING;
auto out_buffer =
column_buffer(column_types[active_col],
num_records_,
true,
stream,
is_final_allocation ? mr_ : rmm::mr::get_current_device_resource());
out_buffer.name = col_names_[col];
out_buffer.null_count() = UNKNOWN_NULL_COUNT;
out_buffers.emplace_back(std::move(out_buffer));
active_col++;
}
}
thrust::host_vector<void *> h_data(num_active_cols_);
thrust::host_vector<bitmask_type *> h_valid(num_active_cols_);
for (int i = 0; i < num_active_cols_; ++i) {
h_data[i] = out_buffers[i].data();
h_valid[i] = out_buffers[i].null_mask();
}
cudf::io::csv::gpu::decode_row_column_data(opts.view(),
data,
make_device_uvector_async(column_flags_, stream),
row_offsets,
make_device_uvector_async(column_types, stream),
make_device_uvector_async(h_data, stream),
make_device_uvector_async(h_valid, stream),
stream);
return out_buffers;
}
/**
* @brief Create a serialized trie for N/A value matching, based on the options.
*/
cudf::detail::trie create_na_trie(char quotechar,
csv_reader_options const &reader_opts,
rmm::cuda_stream_view stream)
{
// Default values to recognize as null values
static std::vector<std::string> const default_na_values{"",
"#N/A",
"#N/A N/A",
"#NA",
"-1.#IND",
"-1.#QNAN",
"-NaN",
"-nan",
"1.#IND",
"1.#QNAN",
"<NA>",
"N/A",
"NA",
"NULL",
"NaN",
"n/a",
"nan",
"null"};
if (!reader_opts.is_enabled_na_filter()) { return cudf::detail::trie(0, stream); }
std::vector<std::string> na_values = reader_opts.get_na_values();
if (reader_opts.is_enabled_keep_default_na()) {
na_values.insert(na_values.end(), default_na_values.begin(), default_na_values.end());
}
// Pandas treats empty strings as N/A if empty fields are treated as N/A
if (std::find(na_values.begin(), na_values.end(), "") != na_values.end()) {
na_values.push_back(std::string(2, quotechar));
}
return cudf::detail::create_serialized_trie(na_values, stream);
}
parse_options make_parse_options(csv_reader_options const &reader_opts,
rmm::cuda_stream_view stream)
{
auto parse_opts = parse_options{};
if (reader_opts.is_enabled_delim_whitespace()) {
parse_opts.delimiter = ' ';
parse_opts.multi_delimiter = true;
} else {
parse_opts.delimiter = reader_opts.get_delimiter();
parse_opts.multi_delimiter = false;
}
parse_opts.terminator = reader_opts.get_lineterminator();
if (reader_opts.get_quotechar() != '\0' && reader_opts.get_quoting() != quote_style::NONE) {
parse_opts.quotechar = reader_opts.get_quotechar();
parse_opts.keepquotes = false;
parse_opts.doublequote = reader_opts.is_enabled_doublequote();
} else {
parse_opts.quotechar = '\0';
parse_opts.keepquotes = true;
parse_opts.doublequote = false;
}
parse_opts.skipblanklines = reader_opts.is_enabled_skip_blank_lines();
parse_opts.comment = reader_opts.get_comment();
parse_opts.dayfirst = reader_opts.is_enabled_dayfirst();
parse_opts.decimal = reader_opts.get_decimal();
parse_opts.thousands = reader_opts.get_thousands();
CUDF_EXPECTS(parse_opts.decimal != parse_opts.delimiter,
"Decimal point cannot be the same as the delimiter");
CUDF_EXPECTS(parse_opts.thousands != parse_opts.delimiter,
"Thousands separator cannot be the same as the delimiter");
// Handle user-defined true values, whereby field data is substituted with a
// boolean true or numeric `1` value
if (reader_opts.get_true_values().size() != 0) {
parse_opts.trie_true =
cudf::detail::create_serialized_trie(reader_opts.get_true_values(), stream);
}
// Handle user-defined false values, whereby field data is substituted with a
// boolean false or numeric `0` value
if (reader_opts.get_false_values().size() != 0) {
parse_opts.trie_false =
cudf::detail::create_serialized_trie(reader_opts.get_false_values(), stream);
}
// Handle user-defined N/A values, whereby field data is treated as null
parse_opts.trie_na = create_na_trie(parse_opts.quotechar, reader_opts, stream);
return parse_opts;
}
reader::impl::impl(std::unique_ptr<datasource> source,
std::string filepath,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
: mr_(mr), source_(std::move(source)), filepath_(filepath), opts_(options)
{
num_actual_cols_ = opts_.get_names().size();
num_active_cols_ = num_actual_cols_;
compression_type_ =
infer_compression_type(opts_.get_compression(),
filepath,
{{"gz", "gzip"}, {"zip", "zip"}, {"bz2", "bz2"}, {"xz", "xz"}});
opts = make_parse_options(options, stream);
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
// Delay actual instantiation of data source until read to allow for
// partial memory mapping of file using byte ranges
_impl = std::make_unique<impl>(nullptr, filepaths[0], options, stream, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
csv_reader_options const &options,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), "", options, stream, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(rmm::cuda_stream_view stream) { return _impl->read(stream); }
} // namespace csv
} // namespace detail
} // namespace io
} // namespace cudf
|
6a6f81547b12b8fd1971af6aded7f7da02d63c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/Array.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/DistributionTemplates.h>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// rrelu
// -----------------------------------
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(0);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (std::is_same<scalar_t, double>::value) {
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 2>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform2_double(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// half and float
hipLaunchKernelGGL(( rrelu_with_noise_cuda_kernel<scalar_t, 4>), dim3(grid), dim3(block), 0, stream,
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (hiprandStatePhilox4_32_10_t* state) {
return hiprand_uniform4(state);
});
C10_HIP_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = ::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::hip::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::hip::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc * c10::hip::compat::tanh(c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::hip::compat::exp(-x_acc));
const T_ACC t_acc =
c10::hip::compat::tanh(c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc));
});
});
}
} // namespace
TORCH_IMPL_FUNC(gelu_out_cuda) (
const Tensor& self, const Tensor& result
) {
GeluCUDAKernelImpl(*this);
}
TORCH_IMPL_FUNC(gelu_backward_out_cuda) (
const Tensor& grad, const Tensor& self, const Tensor& grad_input
) {
GeluBackwardCUDAKernelImpl(*this);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
| 6a6f81547b12b8fd1971af6aded7f7da02d63c8d.cu | #define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/Array.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data)
{
auto iter = TensorIterator::unary_op(result, input);
at::native::gpu_kernel(iter,
[weight_data] GPU_LAMBDA (scalar_t input_val) {
return (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(input_grad)
.add_output(weight_grad_collector)
.add_input(input)
.add_input(grad_out)
.build();
// N.B. `std::tuple` does not support `::operator=` on device code.
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
return {input_grad, weight_grad_collector};
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// rrelu
// -----------------------------------
template <typename scalar_t, int unroll_factor, typename F>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void rrelu_with_noise_cuda_kernel(
int numel,
PhiloxCudaState philox_args,
scalar_t* output,
scalar_t* input,
scalar_t* noise,
double lower,
double upper,
const F& random_func) {
auto seeds = at::cuda::philox::unpack(philox_args);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
int grid_stride = blockDim.x * gridDim.x * unroll_factor;
int rounded_size = ((numel - 1) / grid_stride + 1) * grid_stride;
double range = upper - lower;
for (int linear_index = idx; linear_index < rounded_size; linear_index += grid_stride) {
auto rand = random_func(&state);
// ensure that (&rand.x)[ii] is safe
static_assert(sizeof(rand)/sizeof(rand.x) == unroll_factor, "");
#pragma unroll
for (int ii = 0; ii < unroll_factor; ii++) {
int li = linear_index + blockDim.x * gridDim.x * ii;
if (li >= numel) {
continue;
}
scalar_t r = static_cast<scalar_t>((&rand.x)[ii]);
r = r * range + lower;
if (input[li] <= 0) {
output[li] = input[li] * r;
noise[li] = r;
} else {
output[li] = input[li];
noise[li] = static_cast<scalar_t>(0);
}
}
__syncthreads();
}
}
template <typename scalar_t>
inline void _rrelu_with_noise_cuda_train(
Tensor& output,
const Tensor& input_,
const Tensor& noise_,
const Scalar& lower_,
const Scalar& upper_,
c10::optional<Generator> generator) {
auto input = input_.contiguous();
auto noise = noise_.contiguous();
Tensor tmp_output = output.contiguous();
int64_t numel = input.numel();
auto execution_policy = calc_execution_policy(numel);
auto counter_offset = std::get<0>(execution_policy);
auto grid = std::get<1>(execution_policy);
auto block = std::get<2>(execution_policy);
auto gen = get_generator_or_default<CUDAGeneratorImpl>(
generator, cuda::detail::getDefaultCUDAGenerator());
PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(counter_offset);
}
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* noise_data = noise.data_ptr<scalar_t>();
scalar_t* output_data = tmp_output.data_ptr<scalar_t>();
double lower = lower_.to<double>();
double upper = upper_.to<double>();
auto stream = at::cuda::getCurrentCUDAStream();
if (std::is_same<scalar_t, double>::value) {
rrelu_with_noise_cuda_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower,
upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform2_double(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// half and float
rrelu_with_noise_cuda_kernel<scalar_t, 4><<<grid, block, 0, stream>>>(
numel,
rng_engine_inputs,
output_data,
input_data,
noise_data,
lower, upper,
[] __device__ (curandStatePhilox4_32_10_t* state) {
return curand_uniform4(state);
});
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
if (!output.is_contiguous()) {
output.copy_(tmp_output);
}
}
Tensor& rrelu_with_noise_out_cuda(const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator,
Tensor& output) {
TensorArg self_arg{self, "self", 1}, noise_arg{noise, "noise", 2},
output_arg{output, "output", 3};
checkAllSameGPU("rrelu_with_noise_out_cuda", {self_arg, noise_arg, output_arg});
if (training) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
self.scalar_type(), "rrelu_with_noise_out_cuda", [&] {
_rrelu_with_noise_cuda_train<scalar_t>(
output, self, noise, lower, upper, generator);
});
}
else {
auto lower_tensor = lower.to<double>();
auto upper_tensor = upper.to<double>();
Scalar negative_slope = (lower_tensor + upper_tensor) / 2;
at::leaky_relu_out(output, self, negative_slope);
}
return output;
}
Tensor rrelu_with_noise_cuda(
const Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
Tensor output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
return at::native::rrelu_with_noise_out_cuda(self, noise, lower, upper, training, generator, output);
}
Tensor& rrelu_with_noise_cuda_(
Tensor& self,
const Tensor& noise,
const Scalar& lower,
const Scalar& upper,
bool training,
c10::optional<Generator> generator) {
return at::native::rrelu_with_noise_out_cuda(
self, noise, lower, upper, training, generator, self);
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
});
});
}
void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
}
void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
});
});
}
void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
if (is_result) {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
} else {
return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef;
}
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::cuda::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
}
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
}
void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
}
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return x * std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC one_sixth(1.0f / 6.0f);
const T_ACC three(3.0f);
const T_ACC six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
T_ACC x = static_cast<T_ACC>(self_val);
return std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC zero(0.0f);
const T_ACC three(3.0f);
const T_ACC neg_three(-3.0f);
const T_ACC one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
T_ACC grad_val = static_cast<T_ACC>(grad_val_);
T_ACC self_val = static_cast<T_ACC>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
});
});
}
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC x_acc = static_cast<T_ACC>(x);
return x_acc * c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_cuda",
[&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
const T_ACC s_acc =
T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
const T_ACC t_acc =
c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc));
});
});
}
} // namespace
TORCH_IMPL_FUNC(gelu_out_cuda) (
const Tensor& self, const Tensor& result
) {
GeluCUDAKernelImpl(*this);
}
TORCH_IMPL_FUNC(gelu_backward_out_cuda) (
const Tensor& grad, const Tensor& self, const Tensor& grad_input
) {
GeluBackwardCUDAKernelImpl(*this);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
} // namespace native
} // namespace at
|
0bc7de48bf2fac9a5863d72a9cbc845e56c4bbcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_2_top;
int xdim0_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_2_top;
int ydim0_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_2_top;
int xdim1_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_2_top;
int ydim1_update_halo_kernel2_yvel_minus_2_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_2_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_2_top * \
ydim0_update_halo_kernel2_yvel_minus_2_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_2_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_2_top * \
ydim1_update_halo_kernel2_yvel_minus_2_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_2_top(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -2, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_2_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_top *
ydim0_update_halo_kernel2_yvel_minus_2_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_top *
ydim1_update_halo_kernel2_yvel_minus_2_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_2_top(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 83))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(83, "update_halo_kernel2_yvel_minus_2_top");
OPS_kernels[83].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_top_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_2_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_2_top_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_2_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_2_top_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_2_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_2_top_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_2_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_2_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[83].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_2_top), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[83].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[83].mpi_time += t2 - t1;
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 0bc7de48bf2fac9a5863d72a9cbc845e56c4bbcd.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_2_top;
int xdim0_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_2_top;
int ydim0_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_2_top;
int xdim1_update_halo_kernel2_yvel_minus_2_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_2_top;
int ydim1_update_halo_kernel2_yvel_minus_2_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_2_top * (y) + \
xdim0_update_halo_kernel2_yvel_minus_2_top * \
ydim0_update_halo_kernel2_yvel_minus_2_top * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_2_top * (y) + \
xdim1_update_halo_kernel2_yvel_minus_2_top * \
ydim1_update_halo_kernel2_yvel_minus_2_top * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_2_top(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, -2, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, -2, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_2_top(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_top +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_2_top *
ydim0_update_halo_kernel2_yvel_minus_2_top;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_top +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_2_top *
ydim1_update_halo_kernel2_yvel_minus_2_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_2_top(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_2_top(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 83))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(83, "update_halo_kernel2_yvel_minus_2_top");
OPS_kernels[83].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_2_top_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_2_top_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_2_top_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_2_top_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_2_top, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_2_top_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_2_top, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_2_top_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_2_top, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_2_top_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_2_top, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_2_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[83].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_minus_2_top<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[83].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[83].mpi_time += t2 - t1;
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
9b447c908b7c9310541916972b0344f942ad92a5.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE smoother_device
#include <mfmg/common/exceptions.hpp>
#include <mfmg/cuda/cuda_matrix_operator.cuh>
#include <mfmg/cuda/cuda_smoother.cuh>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparsity_pattern.h>
#include <boost/property_tree/ptree.hpp>
#include "main.cc"
BOOST_AUTO_TEST_CASE(smoother)
{
// Create the cusparse handle
hipsparseHandle_t cusparse_handle = nullptr;
hipsparseStatus_t cusparse_error_code;
cusparse_error_code = hipsparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Create the matrix on the host.
dealii::SparsityPattern sparsity_pattern;
dealii::SparseMatrix<double> matrix;
unsigned int const size = 30;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = ::min(size, i + 2);
unsigned int j_min = (i == 0) ? 0 : i - 1;
for (unsigned int j = j_min; j < j_max; ++j)
column_indices[i].emplace_back(j);
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
matrix.reinit(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = ::min(size - 1, i + 1);
unsigned int j_min = (i == 0) ? 0 : i - 1;
matrix.set(i, j_min, -1.);
matrix.set(i, j_max, -1.);
matrix.set(i, i, 4.);
}
double constexpr scalar_value = 1.;
std::vector<double> domain_host(size);
for (auto &v : domain_host)
v = scalar_value;
std::vector<double> range_host(size, 0.);
dealii::Vector<double> domain_vector(size);
dealii::Vector<double> range_vector(size);
for (auto &v : domain_vector)
v = scalar_value;
// Compute the reference solution
dealii::PreconditionJacobi<dealii::SparseMatrix<double>> precondition;
precondition.initialize(matrix);
dealii::Vector<double> res(domain_vector);
matrix.vmult(res, range_vector);
res.add(-1., domain_vector);
dealii::Vector<double> tmp(range_vector);
precondition.vmult(tmp, res);
range_vector.add(-1., tmp);
// Move the matrix to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(matrix));
matrix_dev->cusparse_handle = cusparse_handle;
cusparse_error_code = hipsparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(matrix_dev->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(matrix_dev->descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the smoother operator
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
cuda_op(
new mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>(matrix_dev));
auto param = std::make_shared<boost::property_tree::ptree>();
mfmg::CudaSmoother<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
smoother_operator(cuda_op, param);
// Apply the smoother
auto domain_dev = cuda_op->build_domain_vector();
auto range_dev = cuda_op->build_range_vector();
mfmg::cuda_mem_copy_to_dev(domain_host, domain_dev->get_values());
mfmg::cuda_mem_copy_to_dev(range_host, range_dev->get_values());
smoother_operator.apply(*domain_dev, *range_dev);
// Compare the solution
mfmg::cuda_mem_copy_to_host(range_dev->get_values(), range_host);
for (unsigned int i = 0; i < size; ++i)
BOOST_CHECK_CLOSE(range_host[i], range_vector[i], 1e-12);
// Destroy the cusparse handle
cusparse_error_code = hipsparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
}
| 9b447c908b7c9310541916972b0344f942ad92a5.cu | /**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE smoother_device
#include <mfmg/common/exceptions.hpp>
#include <mfmg/cuda/cuda_matrix_operator.cuh>
#include <mfmg/cuda/cuda_smoother.cuh>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/sparsity_pattern.h>
#include <boost/property_tree/ptree.hpp>
#include "main.cc"
BOOST_AUTO_TEST_CASE(smoother)
{
// Create the cusparse handle
cusparseHandle_t cusparse_handle = nullptr;
cusparseStatus_t cusparse_error_code;
cusparse_error_code = cusparseCreate(&cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Create the matrix on the host.
dealii::SparsityPattern sparsity_pattern;
dealii::SparseMatrix<double> matrix;
unsigned int const size = 30;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = std::min(size, i + 2);
unsigned int j_min = (i == 0) ? 0 : i - 1;
for (unsigned int j = j_min; j < j_max; ++j)
column_indices[i].emplace_back(j);
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
matrix.reinit(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = std::min(size - 1, i + 1);
unsigned int j_min = (i == 0) ? 0 : i - 1;
matrix.set(i, j_min, -1.);
matrix.set(i, j_max, -1.);
matrix.set(i, i, 4.);
}
double constexpr scalar_value = 1.;
std::vector<double> domain_host(size);
for (auto &v : domain_host)
v = scalar_value;
std::vector<double> range_host(size, 0.);
dealii::Vector<double> domain_vector(size);
dealii::Vector<double> range_vector(size);
for (auto &v : domain_vector)
v = scalar_value;
// Compute the reference solution
dealii::PreconditionJacobi<dealii::SparseMatrix<double>> precondition;
precondition.initialize(matrix);
dealii::Vector<double> res(domain_vector);
matrix.vmult(res, range_vector);
res.add(-1., domain_vector);
dealii::Vector<double> tmp(range_vector);
precondition.vmult(tmp, res);
range_vector.add(-1., tmp);
// Move the matrix to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(matrix));
matrix_dev->cusparse_handle = cusparse_handle;
cusparse_error_code = cusparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(matrix_dev->descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(matrix_dev->descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
// Build the smoother operator
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
cuda_op(
new mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>(matrix_dev));
auto param = std::make_shared<boost::property_tree::ptree>();
mfmg::CudaSmoother<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
smoother_operator(cuda_op, param);
// Apply the smoother
auto domain_dev = cuda_op->build_domain_vector();
auto range_dev = cuda_op->build_range_vector();
mfmg::cuda_mem_copy_to_dev(domain_host, domain_dev->get_values());
mfmg::cuda_mem_copy_to_dev(range_host, range_dev->get_values());
smoother_operator.apply(*domain_dev, *range_dev);
// Compare the solution
mfmg::cuda_mem_copy_to_host(range_dev->get_values(), range_host);
for (unsigned int i = 0; i < size; ++i)
BOOST_CHECK_CLOSE(range_host[i], range_vector[i], 1e-12);
// Destroy the cusparse handle
cusparse_error_code = cusparseDestroy(cusparse_handle);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
}
|
11c258dbbaa5cb022b779baa2c04d103f7af3790.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
/***********************************************/
/* for debug: check the output */
/***********************************************/
void write_output(float *arr, int size, const char *filename)
{
FILE *fp;
if((fp = fopen(filename, "w+")) == NULL)
{
fprintf(stderr, "File write error!\n");
}
int i;
for(i = 0; i < size; i++)
{
fprintf(fp, "%f ", arr[i]);
if( i%10 == 0)
fprintf(fp, "\n");
}
fprintf(fp, "\n");
fclose(fp);
}
//device memory pointers
static int *nd1_velD;
static int *nd1_txyD;
static int *nd1_txzD;
static int *nd1_tyyD;
static int *nd1_tyzD;
static float *rhoD;
static float *drvh1D;
static float *drti1D;
static float *drth1D;
static float *damp1_xD;
static float *damp1_yD;
static int *idmat1D;
static float *dxi1D;
static float *dyi1D;
static float *dzi1D;
static float *dxh1D;
static float *dyh1D;
static float *dzh1D;
static float *t1xxD;
static float *t1xyD;
static float *t1xzD;
static float *t1yyD;
static float *t1yzD;
static float *t1zzD;
static float *t1xx_pxD;
static float *t1xy_pxD;
static float *t1xz_pxD;
static float *t1yy_pxD;
static float *qt1xx_pxD;
static float *qt1xy_pxD;
static float *qt1xz_pxD;
static float *qt1yy_pxD;
static float *t1xx_pyD;
static float *t1xy_pyD;
static float *t1yy_pyD;
static float *t1yz_pyD;
static float *qt1xx_pyD;
static float *qt1xy_pyD;
static float *qt1yy_pyD;
static float *qt1yz_pyD;
static float *qt1xxD;
static float *qt1xyD;
static float *qt1xzD;
static float *qt1yyD;
static float *qt1yzD;
static float *qt1zzD;
static float *clamdaD;
static float *cmuD;
static float *epdtD;
static float *qwpD;
static float *qwsD;
static float *qwt1D;
static float *qwt2D;
static float *v1xD; //output
static float *v1yD;
static float *v1zD;
static float *v1x_pxD;
static float *v1y_pxD;
static float *v1z_pxD;
static float *v1x_pyD;
static float *v1y_pyD;
static float *v1z_pyD;
//for inner_II---------------------------------------------------------
static int *nd2_velD;
static int *nd2_txyD; //int[18]
static int *nd2_txzD; //int[18]
static int *nd2_tyyD; //int[18]
static int *nd2_tyzD; //int[18]
static float *drvh2D;
static float *drti2D;
static float *drth2D; //float[mw2_pml1,0:1]
static int *idmat2D;
static float *damp2_xD;
static float *damp2_yD;
static float *damp2_zD;
static float *dxi2D;
static float *dyi2D;
static float *dzi2D;
static float *dxh2D;
static float *dyh2D;
static float *dzh2D;
static float *t2xxD;
static float *t2xyD;
static float *t2xzD;
static float *t2yyD;
static float *t2yzD;
static float *t2zzD;
static float *qt2xxD;
static float *qt2xyD;
static float *qt2xzD;
static float *qt2yyD;
static float *qt2yzD;
static float *qt2zzD;
static float *t2xx_pxD;
static float *t2xy_pxD;
static float *t2xz_pxD;
static float *t2yy_pxD;
static float *qt2xx_pxD;
static float *qt2xy_pxD;
static float *qt2xz_pxD;
static float *qt2yy_pxD;
static float *t2xx_pyD;
static float *t2xy_pyD;
static float *t2yy_pyD;
static float *t2yz_pyD;
static float *qt2xx_pyD;
static float *qt2xy_pyD;
static float *qt2yy_pyD;
static float *qt2yz_pyD;
static float *t2xx_pzD;
static float *t2xz_pzD;
static float *t2yz_pzD;
static float *t2zz_pzD;
static float *qt2xx_pzD;
static float *qt2xz_pzD;
static float *qt2yz_pzD;
static float *qt2zz_pzD;
static float *v2xD; //output
static float *v2yD;
static float *v2zD;
static float *v2x_pxD;
static float *v2y_pxD;
static float *v2z_pxD;
static float *v2x_pyD;
static float *v2y_pyD;
static float *v2z_pyD;
static float *v2x_pzD;
static float *v2y_pzD;
static float *v2z_pzD;
#define CHECK_ERROR(err, str) \
if (err != hipSuccess) \
{\
printf("Error in \"%s\", %s\n", str, hipGetErrorString(err)); \
}
//debug----------------------
double totalTimeH2DV, totalTimeD2HV;
double totalTimeH2DS, totalTimeD2HS;
double totalTimeCompV, totalTimeCompS;
double tmpTime;
struct timeval t1, t2;
int procID;
//--------------------------------
//!XSC--------------------------------------------------------------------
#define drvh1(i, j) drvh1M[(i) - 1 + (j) * mw1_pml1]
#define drti1(i, j) drti1M[(i) - 1 + (j) * mw1_pml1]
#define drth1(i, j) drth1M[(i) - 1 + (j) * mw1_pml1]
#define damp1_x(i, j, k) damp1_xM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lbx0) * nytop)]
#define damp1_y(i, j, k) damp1_yM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lby0) * nxtop)]
#define idmat1(i, j, k) idmat1M[(i) + (nztop + 2) * ((j) - 1 + ((k) - 1) * (nxtop + 1))]
#define v1x(i, j, k) v1xM[(i) + (nztop + 2) * ((j) + 1 + (k) * (nxtop + 3))]
#define v1y(i, j, k) v1yM[(i) + (nztop + 2) * ((j) + ((k) + 1) * (nxtop + 3))]
#define v1z(i, j, k) v1zM[(i) + (nztop + 2) * ((j) + (k) * (nxtop + 3))]
//nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml
#define v1x_px(i, j, k) v1x_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1y_px(i, j, k) v1y_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1z_px(i, j, k) v1z_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1x_py(i, j, k) v1x_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define v1y_py(i, j, k) v1y_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define v1z_py(i, j, k) v1z_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define dxi1(i, j) dxi1M[((j) - 1) * 4 + (i) - 1]
#define dyi1(i, j) dyi1M[((j) - 1) * 4 + (i) - 1]
#define dzi1(i, j) dzi1M[((j) - 1) * 4 + (i) - 1]
#define dxh1(i, j) dxh1M[((j) - 1) * 4 + (i) - 1]
#define dyh1(i, j) dyh1M[((j) - 1) * 4 + (i) - 1]
#define dzh1(i, j) dzh1M[((j) - 1) * 4 + (i) - 1]
#define t1xx(i, j, k) t1xxM[(i) - 1 + nztop * ((j) + ((k) - 1) * (nxtop + 3))]
#define t1xy(i, j, k) t1xyM[(i) - 1 + nztop * ((j) + 1 + ((k) + 1) * (nxtop + 3))]
#define t1xz(i, j, k) t1xzM[(i) - 1 + (nztop + 1) * ((j) + 1 + ((k) - 1) * (nxtop + 3))]
#define t1yy(i, j, k) t1yyM[(i) - 1 + nztop * (((j) - 1) + (k) * nxtop)]
#define t1yz(i, j, k) t1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) + 1) * nxtop)]
#define t1zz(i, j, k) t1zzM[(i) - 1 + nztop * ((j) - 1 + ((k) - 1) * nxtop)]
//nti = (lbx(2) - lbx(1) + 1) * mw1_pml + lbx(2)
//nth = (lbx(2) - lbx(1) + 1) * mw1_pml + 1 - lbx(1)
#define t1xx_px(i, j, k) t1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define t1xy_px(i, j, k) t1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))]
#define t1xz_px(i, j, k) t1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))]
#define t1yy_px(i, j, k) t1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define qt1xx_px(i, j, k) qt1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define qt1xy_px(i, j, k) qt1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))]
#define qt1xz_px(i, j, k) qt1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))]
#define qt1yy_px(i, j, k) qt1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
//nti = (lby(2) - lby(1) + 1) * mw1_pml + lby(2)
//nth = (lby(2) - lby(1) + 1) * mw1_pml + 1 - lby(1)
#define t1xx_py(i, j, k) t1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1xy_py(i, j, k) t1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1yy_py(i, j, k) t1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1yz_py(i, j, k) t1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xx_py(i, j, k) qt1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xy_py(i, j, k) qt1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yy_py(i, j, k) qt1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yz_py(i, j, k) qt1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xx(i, j, k) qt1xxM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xy(i, j, k) qt1xyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xz(i, j, k) qt1xzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yy(i, j, k) qt1yyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yz(i, j, k) qt1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1zz(i, j, k) qt1zzM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define rho(i) rhoM[(i) - 1]
#define clamda(i) clamdaM[(i) - 1]
#define cmu(i) cmuM[(i) - 1]
#define epdt(i) epdtM[(i) - 1]
#define qwp(i) qwpM[(i) - 1]
#define qws(i) qwsM[(i) - 1]
#define qwt1(i) qwt1M[(i) - 1]
#define qwt2(i) qwt2M[(i) - 1]
//for inner_II
#define drvh2(i, j) drvh2M[(i) - 1 + (j) * mw2_pml1]
#define drti2(i, j) drti2M[(i) - 1 + (j) * mw2_pml1]
#define drth2(i, j) drth2M[(i) - 1 + (j) * mw2_pml1]
#define idmat2(i, j, k) idmat2M[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * (nxbtm + 1))]
#define damp2_x(i, j, k) damp2_xM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lbx0) * nybtm)]
#define damp2_y(i, j, k) damp2_yM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lby0) * nxbtm)]
#define damp2_z(i, j) damp2_zM[(i) - 1 + nxbtm * ((j) - 1)]
#define dxi2(i, j) dxi2M[(i) - 1 + 4 * ((j) - 1)]
#define dyi2(i, j) dyi2M[(i) - 1 + 4 * ((j) - 1)]
#define dzi2(i, j) dzi2M[(i) - 1 + 4 * ((j) - 1)]
#define dxh2(i, j) dxh2M[(i) - 1 + 4 * ((j) - 1)]
#define dyh2(i, j) dyh2M[(i) - 1 + 4 * ((j) - 1)]
#define dzh2(i, j) dzh2M[(i) - 1 + 4 * ((j) - 1)]
#define t2xx(i, j, k) t2xxM[(i) - 1 + nzbtm * ((j) + ((k) - 1) * (nxbtm + 3))]
#define t2xy(i, j, k) t2xyM[(i) - 1 + nzbtm * ((j) + 1 + ((k) + 1) * (nxbtm + 3))]
#define t2xz(i, j, k) t2xzM[(i) + (nzbtm + 1) * ((j) + 1 + ((k) - 1) * (nxbtm + 3))]
#define t2yy(i, j, k) t2yyM[(i) - 1 + nzbtm * (((j) - 1) + (k) * nxbtm)]
#define t2yz(i, j, k) t2yzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) + 1) * nxbtm)]
#define t2zz(i, j, k) t2zzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * nxbtm)]
#define qt2xx(i, j, k) qt2xxM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xy(i, j, k) qt2xyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xz(i, j, k) qt2xzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yy(i, j, k) qt2yyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz(i, j, k) qt2yzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2zz(i, j, k) qt2zzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
//nti = (lbx(2) - lbx(1) + 1) * mw2_pml + lbx(2)
//nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1)
#define t2xx_px(i, j, k) t2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define t2xy_px(i, j, k) t2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define t2xz_px(i, j, k) t2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define t2yy_px(i, j, k) t2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define t2xx_py(i, j, k) t2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xy_py(i, j, k) t2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yy_py(i, j, k) t2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yz_py(i, j, k) t2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xx_pz(i, j, k) t2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xz_pz(i, j, k) t2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yz_pz(i, j, k) t2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2zz_pz(i, j, k) t2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xx_px(i, j, k) qt2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define qt2xy_px(i, j, k) qt2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define qt2xz_px(i, j, k) qt2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define qt2yy_px(i, j, k) qt2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define qt2xx_py(i, j, k) qt2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xy_py(i, j, k) qt2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yy_py(i, j, k) qt2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz_py(i, j, k) qt2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xx_pz(i, j, k) qt2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xz_pz(i, j, k) qt2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz_pz(i, j, k) qt2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2zz_pz(i, j, k) qt2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2x(i, j, k) v2xM[(i) + (nzbtm + 1) * ((j) + 1 + (nxbtm + 3) * (k))]
#define v2y(i, j, k) v2yM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * ((k) + 1))]
#define v2z(i, j, k) v2zM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * (k))]
//nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml
#define v2x_px(i, j, k) v2x_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2y_px(i, j, k) v2y_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2z_px(i, j, k) v2z_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2x_py(i, j, k) v2x_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2y_py(i, j, k) v2y_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2z_py(i, j, k) v2z_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2x_pz(i, j, k) v2x_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2y_pz(i, j, k) v2y_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2z_pz(i, j, k) v2z_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
__global__ void velocity_inner_IC(int nztop,
int nztm1,
float ca,
int *nd1_vel,
float *rhoM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int nxtop, //dimension #
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM);
__global__ void velocity_inner_IIC(float ca,
int *nd2_vel,
float *rhoM,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
int *idmat2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int nxbtm, //dimension #s
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z);
__global__ void vel_PmlX_IC(float ca,
int lbx0,
int lbx1,
int *nd1_vel,
float *rhoM,
float *drvh1,
float *drti1,
float *damp1_x,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
int mw1_pml1, //dimension #
int mw1_pml,
int nxtop,
int nytop,
int nztop,
float *v1x, //output
float *v1y,
float *v1z,
float *v1x_px,
float *v1y_px,
float *v1z_px);
__global__ void vel_PmlY_IC(int nztop,
float ca,
int lby0,
int lby1,
int *nd1_vel,
float *rhoM,
float *drvh1,
float *drti1,
int *idmat1,
float *damp1_y,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
int mw1_pml1, //dimension #s
int mw1_pml,
int nxtop,
int nytop,
float *v1x, //output
float *v1y,
float *v1z,
float *v1x_py,
float *v1y_py,
float *v1z_py);
__global__ void vel_PmlX_IIC(int nzbm1,
float ca,
int lbx0,
int lbx1,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_x,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_px,
float *v2y_px,
float *v2z_px);
__global__ void vel_PmlY_IIC(int nzbm1,
float ca,
int lby0,
int lby1,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_y,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1,
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_py,
float *v2y_py,
float *v2z_py);
__global__ void vel_PmlZ_IIC(int nzbm1,
float ca,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_z,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_pz,
float *v2y_pz,
float *v2z_pz);
#ifdef __cplusplus
extern "C" {
#endif
extern void compute_velocityCDebug( int *nztop, int *nztm1, float *ca, int *lbx,
int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M,
float *damp1_xM, float *damp1_yM, int *idmat1M,float *dxi1M, float *dyi1M,
float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM,
float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM,
void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM,
float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM,
int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M,
int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M,
float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM,
float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp,
float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM,
float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM,
float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1,
int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml,
int *nxbtm, int *nybtm, int *nzbtm);
extern void compute_stressCDebug(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml,
int *mw1_pml1, int *lbx, int *lby, int *nd1_txy, int *nd1_txz,
int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM,
float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M,
float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM,
float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM,
float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM,
float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM,
float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp,
int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz,
int *nd2_tyy, int *nd2_tyz, int *idmat2M,
float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM,
float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM,
float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M,
float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM,
float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM,
float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM,
float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM,
void **v2xMp, void **v2yMp, void **v2zMp, int *myid);
void set_deviceC(int *deviceID)
{
hipSetDevice(*deviceID);
//printf("[CUDA] device set success!\n");
}
//===========================================================================
void allocate_gpu_memC(int *lbx,
int *lby,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nzbm1,
int *nll)
{
//printf("[CUDA] allocation ...............");
int nv2, nti, nth;
hipError_t cudaRes;
// printf("lbx[1] = %d, lbx[0] = %d\n", lbx[1], lbx[0]);
// printf("lby[1] = %d, lby[0] = %d\n", lby[1], lby[0]);
// printf("nmat = %d\n", *nmat);
// printf("mw1_pml1 = %d, mw2_pml1 = %d\n", *mw1_pml1, *mw2_pml1);
// printf("mw1_pml = %d, mw2_pml = %d\n", *mw1_pml, *mw2_pml);
// printf("nxtop = %d, nytop = %d, nztop = %d\n", *nxtop, *nytop, *nztop);
// printf("nxbtm = %d, nybtm = %d, nzbtm = %d\n", *nxbtm, *nybtm, *nzbtm);
// printf("nzbm1 = %d, nll = %d\n", *nzbm1, *nll);
//debug-----------------
totalTimeH2DV = 0.0f;
totalTimeD2HV = 0.0f;
totalTimeH2DS = 0.0f;
totalTimeD2HS = 0.0f;
totalTimeCompV = 0.0f;
totalTimeCompS = 0.0f;
//for inner_I
cudaRes = hipMalloc((void **)&nd1_velD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_vel");
cudaRes = hipMalloc((void **)&nd1_txyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txy");
cudaRes = hipMalloc((void **)&nd1_txzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txz");
cudaRes = hipMalloc((void **)&nd1_tyyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyy");
cudaRes = hipMalloc((void **)&nd1_tyzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyz");
cudaRes = hipMalloc((void **)&rhoD, sizeof(float) * (*nmat));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, rho");
cudaRes = hipMalloc((void **)&drvh1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drvh1");
cudaRes = hipMalloc((void **)&drti1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drti1");
cudaRes = hipMalloc((void **)&drth1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drth1");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMalloc((void **)&damp1_xD, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMalloc((void **)&damp1_yD, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_y");
}
cudaRes = hipMalloc((void **)&idmat1D, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, idmat1");
cudaRes = hipMalloc((void **)&dxi1D, sizeof(float) * 4 * (*nxtop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxi1");
cudaRes = hipMalloc((void **)&dyi1D, sizeof(float) * 4 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyi1");
cudaRes = hipMalloc((void **)&dzi1D, sizeof(float) * 4 * (*nztop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzi1");
cudaRes = hipMalloc((void **)&dxh1D, sizeof(float) * 4 * (*nxtop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxh1");
cudaRes = hipMalloc((void **)&dyh1D, sizeof(float) * 4 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyh1");
cudaRes = hipMalloc((void **)&dzh1D, sizeof(float) * 4 * (*nztop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzh1");
cudaRes = hipMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx");
cudaRes = hipMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy");
cudaRes = hipMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz");
cudaRes = hipMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy");
cudaRes = hipMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz");
cudaRes = hipMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz");
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0];
hipMalloc((void **)&t1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop));
hipMalloc((void **)&t1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop));
hipMalloc((void **)&t1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop));
hipMalloc((void **)&t1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop));
hipMalloc((void **)&qt1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop));
hipMalloc((void **)&qt1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop));
hipMalloc((void **)&qt1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop));
hipMalloc((void **)&qt1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop));
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0];
hipMalloc((void **)&t1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
hipMalloc((void **)&t1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth);
hipMalloc((void **)&t1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
hipMalloc((void **)&t1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth);
hipMalloc((void **)&qt1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
hipMalloc((void **)&qt1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth);
hipMalloc((void **)&qt1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
hipMalloc((void **)&qt1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth);
}
hipMalloc((void **)&qt1xxD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
hipMalloc((void **)&qt1xyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
hipMalloc((void **)&qt1xzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop));
hipMalloc((void **)&qt1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
hipMalloc((void **)&qt1yzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop));
hipMalloc((void **)&qt1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
hipMalloc((void **)&clamdaD, sizeof(float) * (*nmat));
hipMalloc((void **)&cmuD, sizeof(float) * (*nmat));
hipMalloc((void **)&epdtD, sizeof(float) * (*nll));
hipMalloc((void **)&qwpD, sizeof(float) * (*nmat));
hipMalloc((void **)&qwsD, sizeof(float) * (*nmat));
hipMalloc((void **)&qwt1D, sizeof(float) * (*nll));
hipMalloc((void **)&qwt2D, sizeof(float) * (*nll));
cudaRes = hipMalloc((void **)&v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x");
cudaRes = hipMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y");
cudaRes = hipMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml);
cudaRes = hipMalloc((void **)&v1x_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_px");
cudaRes = hipMalloc((void **)&v1y_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_px");
cudaRes = hipMalloc((void **)&v1z_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml);
cudaRes = hipMalloc((void **)&v1x_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_py");
cudaRes = hipMalloc((void **)&v1y_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_py");
cudaRes = hipMalloc((void **)&v1z_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_py");
}
//for inner_II-----------------------------------------------------------------------------------------
cudaRes = hipMalloc((void **)&nd2_velD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_vel");
cudaRes = hipMalloc((void **)&nd2_txyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txy");
cudaRes = hipMalloc((void **)&nd2_txzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txz");
cudaRes = hipMalloc((void **)&nd2_tyyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyy");
cudaRes = hipMalloc((void **)&nd2_tyzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyz");
cudaRes = hipMalloc((void **)&drvh2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drvh2");
cudaRes = hipMalloc((void **)&drti2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drti2");
cudaRes = hipMalloc((void **)&drth2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drth2");
cudaRes = hipMalloc((void **)&idmat2D, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMalloc((void **)&damp2_xD, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMalloc((void **)&damp2_yD, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_y");
}
cudaRes = hipMalloc((void **)&damp2_zD, sizeof(float) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_z");
cudaRes = hipMalloc((void **)&dxi2D, sizeof(float) * 4 * (*nxbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dxi2");
cudaRes = hipMalloc((void **)&dyi2D, sizeof(float) * 4 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dyi2");
cudaRes = hipMalloc((void **)&dzi2D, sizeof(float) * 4 * (*nzbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dzi2");
cudaRes = hipMalloc((void **)&dxh2D, sizeof(float) * 4 * (*nxbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dxh2");
cudaRes = hipMalloc((void **)&dyh2D, sizeof(float) * 4 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dyh2");
cudaRes = hipMalloc((void **)&dzh2D, sizeof(float) * 4 * (*nzbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dzh2");
cudaRes = hipMalloc((void **)&t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xx");
cudaRes = hipMalloc((void **)&t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xy");
cudaRes = hipMalloc((void **)&t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xz");
cudaRes = hipMalloc((void **)&t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yy");
cudaRes = hipMalloc((void **)&t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yz");
cudaRes = hipMalloc((void **)&t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2zz");
hipMalloc((void **)&qt2xxD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2xyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2xzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2yzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2zzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0];
hipMalloc((void **)&t2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
hipMalloc((void **)&t2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
hipMalloc((void **)&t2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
hipMalloc((void **)&t2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
hipMalloc((void **)&qt2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
hipMalloc((void **)&qt2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
hipMalloc((void **)&qt2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
hipMalloc((void **)&qt2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0];
hipMalloc((void **)&t2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
hipMalloc((void **)&t2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
hipMalloc((void **)&t2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
hipMalloc((void **)&t2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
hipMalloc((void **)&qt2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
hipMalloc((void **)&qt2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
hipMalloc((void **)&qt2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
hipMalloc((void **)&qt2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
}
hipMalloc((void **)&t2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&t2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&t2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&t2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&qt2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
hipMalloc((void **)&v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
hipMalloc((void **)&v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
hipMalloc((void **)&v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml);
cudaRes = hipMalloc((void **)&v2x_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_px");
cudaRes = hipMalloc((void **)&v2y_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_px");
cudaRes = hipMalloc((void **)&v2z_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml);
cudaRes = hipMalloc((void **)&v2x_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_py");
cudaRes = hipMalloc((void **)&v2y_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_py");
cudaRes = hipMalloc((void **)&v2z_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_py");
}
cudaRes = hipMalloc((void **)&v2x_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_pz");
cudaRes = hipMalloc((void **)&v2y_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_pz");
cudaRes = hipMalloc((void **)&v2z_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_pz");
//printf("done!\n");
return;
}
void cpy_h2d_velocityInputsCOneTime(int *lbx,
int *lby,
int *nd1_vel,
float *rho,
float *drvh1,
float *drti1,
float *damp1_x,
float *damp1_y,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *v1x_px,
float *v1y_px,
float *v1z_px,
float *v1x_py,
float *v1y_py,
float *v1z_py,
int *nd2_vel,
float *drvh2,
float *drti2,
int *idmat2,
float *damp2_x,
float *damp2_y,
float *damp2_z,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
float *v2x_px,
float *v2y_px,
float *v2z_px,
float *v2x_py,
float *v2y_py,
float *v2z_py,
float *v2x_pz,
float *v2y_pz,
float *v2z_pz,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nzbm1)
{
//printf("[CUDA] initial h2d cpy for velocity ........");
hipError_t cudaRes;
int nv2;
// int i;
// for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++)
// {
// printf("%f ", t2xy[i]);
// }
// printf("\n");
//for inner_I
cudaRes = hipMemcpy(nd1_velD, nd1_vel, sizeof(int) * 18, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd1_vel");
cudaRes = hipMemcpy(rhoD, rho, sizeof(float) * (*nmat), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, rho");
cudaRes = hipMemcpy(drvh1D, drvh1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh1");
cudaRes = hipMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti1");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMemcpy(damp1_xD, damp1_x,
sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMemcpy(damp1_yD, damp1_y,
sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_y");
}
cudaRes = hipMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat1");
cudaRes = hipMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi1");
cudaRes = hipMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi1");
cudaRes = hipMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi1");
cudaRes = hipMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh1");
cudaRes = hipMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh1");
cudaRes = hipMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh1");
cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx");
cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy");
cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz");
cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy");
cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz");
cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml);
cudaRes = hipMemcpy(v1x_pxD, v1x_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_px");
cudaRes = hipMemcpy(v1y_pxD, v1y_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_px");
cudaRes = hipMemcpy(v1z_pxD, v1z_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml);
cudaRes = hipMemcpy(v1x_pyD, v1x_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_py");
cudaRes = hipMemcpy(v1y_pyD, v1y_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_py");
cudaRes = hipMemcpy(v1z_pyD, v1z_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_py");
}
//for inner_II
cudaRes = hipMemcpy(nd2_velD, nd2_vel, sizeof(int) * 18, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd2_vel");
cudaRes = hipMemcpy(drvh2D, drvh2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh2");
cudaRes = hipMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti2");
cudaRes = hipMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMemcpy(damp2_xD, damp2_x,
sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMemcpy(damp2_yD, damp2_y,
sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_y");
}
cudaRes = hipMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_z");
cudaRes = hipMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi2");
cudaRes = hipMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi2");
cudaRes = hipMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi2");
cudaRes = hipMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh2");
cudaRes = hipMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh2");
cudaRes = hipMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh2");
cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx");
cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy");
cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz");
cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy");
cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz");
cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml);
cudaRes = hipMemcpy(v2x_pxD, v2x_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_px");
cudaRes = hipMemcpy(v2y_pxD, v2y_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_px");
cudaRes = hipMemcpy(v2z_pxD, v2z_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml);
cudaRes = hipMemcpy(v2x_pyD, v2x_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_py");
cudaRes = hipMemcpy(v2y_pyD, v2y_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_py");
cudaRes = hipMemcpy(v2z_pyD, v2z_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_py");
}
cudaRes = hipMemcpy(v2x_pzD, v2x_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_pz");
cudaRes = hipMemcpy(v2y_pzD, v2y_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_pz");
cudaRes = hipMemcpy(v2z_pzD, v2z_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_pz");
//printf("done!\n");
return;
}
void cpy_h2d_velocityInputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for input ..........");
hipError_t cudaRes;
//for inner_I
cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx");
cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy");
cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz");
cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy");
cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz");
cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz");
//for inner_II
cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx");
cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy");
cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz");
cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy");
cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz");
cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz");
//printf("done!\n");
return;
}
//=====================================================================
void cpy_h2d_stressInputsCOneTime(int *lbx,
int *lby,
int *nd1_txy,
int *nd1_txz,
int *nd1_tyy,
int *nd1_tyz,
float *drti1,
float *drth1,
float *damp1_x,
float *damp1_y,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *v1x,
float *v1y,
float *v1z,
float *t1xx_px,
float *t1xy_px,
float *t1xz_px,
float *t1yy_px,
float *qt1xx_px,
float *qt1xy_px,
float *qt1xz_px,
float *qt1yy_px,
float *t1xx_py,
float *t1xy_py,
float *t1yy_py,
float *t1yz_py,
float *qt1xx_py,
float *qt1xy_py,
float *qt1yy_py,
float *qt1yz_py,
float *qt1xx,
float *qt1xy,
float *qt1xz,
float *qt1yy,
float *qt1yz,
float *qt1zz,
float *clamda,
float *cmu,
float *epdt,
float *qwp,
float *qws,
float *qwt1,
float *qwt2,
int *nd2_txy,
int *nd2_txz,
int *nd2_tyy,
int *nd2_tyz,
float *drti2,
float *drth2,
int *idmat2,
float *damp2_x,
float *damp2_y,
float *damp2_z,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *v2x,
float *v2y,
float *v2z,
float *qt2xx,
float *qt2xy,
float *qt2xz,
float *qt2yy,
float *qt2yz,
float *qt2zz,
float *t2xx_px,
float *t2xy_px,
float *t2xz_px,
float *t2yy_px,
float *qt2xx_px,
float *qt2xy_px,
float *qt2xz_px,
float *qt2yy_px,
float *t2xx_py,
float *t2xy_py,
float *t2yy_py,
float *t2yz_py,
float *qt2xx_py,
float *qt2xy_py,
float *qt2yy_py,
float *qt2yz_py,
float *t2xx_pz,
float *t2xz_pz,
float *t2yz_pz,
float *t2zz_pz,
float *qt2xx_pz,
float *qt2xz_pz,
float *qt2yz_pz,
float *qt2zz_pz,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nll)
{
//printf("[CUDA] initial h2d cpy for stress ...........");
hipError_t cudaRes;
int nti, nth;
//for inner_I
cudaRes = hipMemcpy(nd1_txyD, nd1_txy, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd1_txzD, nd1_txz, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd1_tyyD, nd1_tyy, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd1_tyzD, nd1_tyz, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti1");
cudaRes = hipMemcpy(drth1D, drth1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth1");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMemcpy(damp1_xD, damp1_x,
sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMemcpy(damp1_yD, damp1_y,
sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_y");
}
cudaRes = hipMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat1");
cudaRes = hipMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi1");
cudaRes = hipMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi1");
cudaRes = hipMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi1");
cudaRes = hipMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh1");
cudaRes = hipMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh1");
cudaRes = hipMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh1");
hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0];
hipMemcpy(t1xx_pxD, t1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), hipMemcpyHostToDevice);
//debug
//write_output(t1xx_px, (*nztop) * (nti) * (*nytop), "OUTPUT_ARRAYS/t1xx_px_cuda.txt");
hipMemcpy(t1xy_pxD, t1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(t1xz_pxD, t1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(t1yy_pxD, t1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1xx_pxD, qt1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1xy_pxD, qt1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1xz_pxD, qt1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1yy_pxD, qt1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), hipMemcpyHostToDevice);
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0];
hipMemcpy(t1xx_pyD, t1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice);
hipMemcpy(t1xy_pyD, t1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, hipMemcpyHostToDevice);
hipMemcpy(t1yy_pyD, t1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice);
hipMemcpy(t1yz_pyD, t1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, hipMemcpyHostToDevice);
hipMemcpy(qt1xx_pyD, qt1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice);
hipMemcpy(qt1xy_pyD, qt1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, hipMemcpyHostToDevice);
hipMemcpy(qt1yy_pyD, qt1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice);
hipMemcpy(qt1yz_pyD, qt1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, hipMemcpyHostToDevice);
}
hipMemcpy(qt1xxD, qt1xx, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1xyD, qt1xy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1xzD, qt1xz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1yyD, qt1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1yzD, qt1yz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(qt1zzD, qt1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
hipMemcpy(clamdaD, clamda, sizeof(float) * (*nmat), hipMemcpyHostToDevice);
hipMemcpy(cmuD, cmu, sizeof(float) * (*nmat), hipMemcpyHostToDevice);
hipMemcpy(epdtD, epdt, sizeof(float) * (*nll), hipMemcpyHostToDevice);
hipMemcpy(qwpD, qwp, sizeof(float) * (*nmat), hipMemcpyHostToDevice);
hipMemcpy(qwsD, qws, sizeof(float) * (*nmat), hipMemcpyHostToDevice);
hipMemcpy(qwt1D, qwt1, sizeof(float) * (*nll), hipMemcpyHostToDevice);
hipMemcpy(qwt2D, qwt2, sizeof(float) * (*nll), hipMemcpyHostToDevice);
//for inner_II
cudaRes = hipMemcpy(nd2_txyD, nd2_txy, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd2_txzD, nd2_txz, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd2_tyyD, nd2_tyy, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(nd2_tyzD, nd2_tyz, sizeof(int) * 18, hipMemcpyHostToDevice);
cudaRes = hipMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti2");
cudaRes = hipMemcpy(drth2D, drth2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth2");
cudaRes = hipMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = hipMemcpy(damp2_xD, damp2_x,
sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = hipMemcpy(damp2_yD, damp2_y,
sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1),
hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_y");
}
cudaRes = hipMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_z");
cudaRes = hipMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi2");
cudaRes = hipMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi2");
cudaRes = hipMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi2");
cudaRes = hipMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh2");
cudaRes = hipMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh2");
cudaRes = hipMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh2");
hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
hipMemcpy(qt2xxD, qt2xx, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xyD, qt2xy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xzD, qt2xz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2yyD, qt2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2yzD, qt2yz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2zzD, qt2zz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0];
hipMemcpy(t2xx_pxD, t2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2xy_pxD, t2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2xz_pxD, t2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2yy_pxD, t2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xx_pxD, qt2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xy_pxD, qt2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xz_pxD, qt2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2yy_pxD, qt2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice);
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0];
hipMemcpy(t2xx_pyD, t2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice);
hipMemcpy(t2xy_pyD, t2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice);
hipMemcpy(t2yy_pyD, t2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice);
hipMemcpy(t2yz_pyD, t2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice);
hipMemcpy(qt2xx_pyD, qt2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice);
hipMemcpy(qt2xy_pyD, qt2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice);
hipMemcpy(qt2yy_pyD, qt2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice);
hipMemcpy(qt2yz_pyD, qt2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice);
}
hipMemcpy(t2xx_pzD, t2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2xz_pzD, t2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2yz_pzD, t2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(t2zz_pzD, t2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xx_pzD, qt2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2xz_pzD, qt2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2yz_pzD, qt2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
hipMemcpy(qt2zz_pzD, qt2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
//printf("done!\n");
return;
}
void cpy_h2d_stressInputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for input ..............");
hipError_t cudaRes;
//for inner_I
hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
//for inner_II
hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
//printf("done!\n");
return;
}
//=====================================================================
void cpy_h2d_velocityOutputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for output .........");
hipError_t cudaRes;
//for inner_I
cudaRes = hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x");
cudaRes = hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y");
cudaRes = hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z");
//for inner_II
cudaRes = hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x");
cudaRes = hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y");
cudaRes = hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z");
//printf("done!\n");
return;
}
//=====================================================================
void cpy_d2h_velocityOutputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] d2h cpy for output .........");
hipError_t cudaRes;
//for inner_I
cudaRes = hipMemcpy(v1x, v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1x");
cudaRes = hipMemcpy(v1y, v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1y");
cudaRes = hipMemcpy(v1z, v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1z");
//for inner_II
cudaRes = hipMemcpy(v2x, v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2x");
cudaRes = hipMemcpy(v2y, v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2y");
cudaRes = hipMemcpy(v2z, v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, vzz");
//printf("done!\n");
return;
}
void cpy_h2d_stressOutputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for output ..............");
hipError_t cudaRes;
int nth, nti;
cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xx");
cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xy");
cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xz");
cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yy");
cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yz");
cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1zz");
//for inner_II
cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xx");
cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xy");
cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xz");
cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yy");
cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yz");
cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2zz");
//printf("done!\n");
return;
}
void cpy_d2h_stressOutputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] stress cpy d2h for output .....");
// printf("\nnxtop=%d, nytop=%d, nztop=%d\n", *nxtop, *nytop, *nztop);
// printf("nxbtm=%d, nybtm=%d, nzbtm=%d\n", *nxbtm, *nybtm, *nzbtm);
hipError_t cudaRes;
cudaRes = hipMemcpy(t1xx, t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xx");
cudaRes = hipMemcpy(t1xy, t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xy");
cudaRes = hipMemcpy(t1xz, t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xz");
cudaRes = hipMemcpy(t1yy, t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yy");
cudaRes = hipMemcpy(t1yz, t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yz");
cudaRes = hipMemcpy(t1zz, t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1zz");
cudaRes = hipMemcpy(t2xx, t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xx");
cudaRes = hipMemcpy(t2xy, t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xy");
cudaRes = hipMemcpy(t2xz, t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xz");
cudaRes = hipMemcpy(t2yy, t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yy");
cudaRes = hipMemcpy(t2yz, t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yz");
cudaRes = hipMemcpy(t2zz, t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2zz");
//printf("done!\n");
// int i;
// for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++)
// {
// //printf("%f ", t2xx[i]);
// }
// printf("\n");
return;
}
void free_device_memC(int *lbx, int *lby)
{
//debug---------------------------------------------------
printf("[CUDA] id = %d, vel, H2D =, %.3f, D2H =, %.3f, comp =, %lf\n", procID, totalTimeH2DV, totalTimeD2HV, totalTimeCompV);
printf("[CUDA] id = %d, str, H2D =, %.3f, D2H =, %.3f, comp =, %lf\n", procID, totalTimeH2DS, totalTimeD2HS, totalTimeCompS);
//-------------------------------------------------
hipFree(nd1_velD);
hipFree(nd1_txyD);
hipFree(nd1_txzD);
hipFree(nd1_tyyD);
hipFree(nd1_tyzD);
hipFree(rhoD);
hipFree(drvh1D);
hipFree(drti1D);
hipFree(drth1D);
hipFree(idmat1D);
hipFree(dxi1D);
hipFree(dyi1D);
hipFree(dzi1D);
hipFree(dxh1D);
hipFree(dyh1D);
hipFree(dzh1D);
hipFree(t1xxD);
hipFree(t1xyD);
hipFree(t1xzD);
hipFree(t1yyD);
hipFree(t1yzD);
hipFree(t1zzD);
hipFree(v1xD); //output
hipFree(v1yD);
hipFree(v1zD);
if (lbx[1] >= lbx[0])
{
hipFree(damp1_xD);
hipFree(t1xx_pxD);
hipFree(t1xy_pxD);
hipFree(t1xz_pxD);
hipFree(t1yy_pxD);
hipFree(qt1xx_pxD);
hipFree(qt1xy_pxD);
hipFree(qt1xz_pxD);
hipFree(qt1yy_pxD);
hipFree(v1x_pxD);
hipFree(v1y_pxD);
hipFree(v1z_pxD);
}
if (lby[1] >= lby[0])
{
hipFree(damp1_yD);
hipFree(t1xx_pyD);
hipFree(t1xy_pyD);
hipFree(t1yy_pyD);
hipFree(t1yz_pyD);
hipFree(qt1xx_pyD);
hipFree(qt1xy_pyD);
hipFree(qt1yy_pyD);
hipFree(qt1yz_pyD);
hipFree(v1x_pyD);
hipFree(v1y_pyD);
hipFree(v1z_pyD);
}
hipFree(qt1xxD);
hipFree(qt1xyD);
hipFree(qt1xzD);
hipFree(qt1yyD);
hipFree(qt1yzD);
hipFree(qt1zzD);
hipFree(clamdaD);
hipFree(cmuD);
hipFree(epdtD);
hipFree(qwpD);
hipFree(qwsD);
hipFree(qwt1D);
hipFree(qwt2D);
//-------------------------------------
hipFree(nd2_velD);
hipFree(nd2_txyD);
hipFree(nd2_txzD);
hipFree(nd2_tyyD);
hipFree(nd2_tyzD);
hipFree(drvh2D);
hipFree(drti2D);
hipFree(drth2D);
hipFree(idmat2D);
hipFree(damp2_zD);
hipFree(dxi2D);
hipFree(dyi2D);
hipFree(dzi2D);
hipFree(dxh2D);
hipFree(dyh2D);
hipFree(dzh2D);
hipFree(t2xxD);
hipFree(t2xyD);
hipFree(t2xzD);
hipFree(t2yyD);
hipFree(t2yzD);
hipFree(t2zzD);
hipFree(qt2xxD);
hipFree(qt2xyD);
hipFree(qt2xzD);
hipFree(qt2yyD);
hipFree(qt2yzD);
hipFree(qt2zzD);
if (lbx[1] >= lbx[0])
{
hipFree(damp2_xD);
hipFree(t2xx_pxD);
hipFree(t2xy_pxD);
hipFree(t2xz_pxD);
hipFree(t2yy_pxD);
hipFree(qt2xx_pxD);
hipFree(qt2xy_pxD);
hipFree(qt2xz_pxD);
hipFree(qt2yy_pxD);
hipFree(v2x_pxD);
hipFree(v2y_pxD);
hipFree(v2z_pxD);
}
if (lby[1] >= lby[0])
{
hipFree(damp2_yD);
hipFree(t2xx_pyD);
hipFree(t2xy_pyD);
hipFree(t2yy_pyD);
hipFree(t2yz_pyD);
hipFree(qt2xx_pyD);
hipFree(qt2xy_pyD);
hipFree(qt2yy_pyD);
hipFree(qt2yz_pyD);
hipFree(v2x_pyD);
hipFree(v2y_pyD);
hipFree(v2z_pyD);
}
hipFree(t2xx_pzD);
hipFree(t2xz_pzD);
hipFree(t2yz_pzD);
hipFree(t2zz_pzD);
hipFree(qt2xx_pzD);
hipFree(qt2xz_pzD);
hipFree(qt2yz_pzD);
hipFree(qt2zz_pzD);
hipFree(v2xD); //output
hipFree(v2yD);
hipFree(v2zD);
hipFree(v2x_pzD);
hipFree(v2y_pzD);
hipFree(v2z_pzD);
//printf("[CUDA] memory space is freed.\n");
return;
}
void compute_velocityC(int *nztop, int *nztm1, float *ca, int *lbx,
int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M,
float *damp1_xM, float *damp1_yM, int *idmat1M, float *dxi1M, float *dyi1M,
float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM,
float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM,
void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM,
float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM,
int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M,
int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M,
float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM,
float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp,
float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM,
float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM,
float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1,
int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml,
int *nxbtm, int *nybtm, int *nzbtm, int *myid)
{
//printf("[CUDA] velocity computation:\n");
//difine the dimensions of different kernels
int blockSizeX = 8;
int blockSizeY = 8;
float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM;
// extract specific input/output pointers
v1xM=(float *) *v1xMp;
v1yM=(float *) *v1yMp;
v1zM=(float *) *v1zMp;
v2xM=(float *) *v2xMp;
v2yM=(float *) *v2yMp;
v2zM=(float *) *v2zMp;
procID = *myid;
gettimeofday(&t1, NULL);
cpy_h2d_velocityInputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM,
t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
cpy_h2d_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeH2DV += tmpTime;
gettimeofday(&t1, NULL);
dim3 dimBlock(blockSizeX, blockSizeY);
int gridSizeX1 = (nd1_vel[3] - nd1_vel[2])/blockSizeX + 1;
int gridSizeY1 = (nd1_vel[9] - nd1_vel[8])/blockSizeY + 1;
dim3 dimGrid1(gridSizeX1, gridSizeY1);
// printf("myid = %d, grid1 = (%d, %d)\n", *myid, gridSizeX1, gridSizeY1);
//CUDA code
hipLaunchKernelGGL(( velocity_inner_IC), dim3(dimGrid1), dim3(dimBlock), 0, 0, *nztop,
*nztm1,
*ca,
nd1_velD,
rhoD,
idmat1D,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*nxtop, //dimension #
*nytop,
v1xD, //output
v1yD,
v1zD);
// printf("velocity_inner_IC called!\n");
int gridSizeX2 = (nd1_vel[5] - nd1_vel[0])/blockSizeX + 1;
int gridSizeY2 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid2(gridSizeX2, gridSizeY2);
// printf("myid = %d, grid2 = (%d, %d)\n", *myid, gridSizeX2, gridSizeY2);
if (lbx[1] >= lbx[0])
{
hipLaunchKernelGGL(( vel_PmlX_IC), dim3(dimGrid2), dim3(dimBlock), 0, 0, *ca,
lbx[0],
lbx[1],
nd1_velD,
rhoD,
drvh1D,
drti1D,
damp1_xD,
idmat1D,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*mw1_pml1, //dimension #
*mw1_pml,
*nxtop,
*nytop,
*nztop,
v1xD, //output
v1yD,
v1zD,
v1x_pxD,
v1y_pxD,
v1z_pxD);
// printf("vel_PmlX_IC called!\n");
}
int gridSizeX3 = (lby[1] - lby[0])/blockSizeX + 1;
int gridSizeY3 = (nd1_vel[11] - nd1_vel[6])/blockSizeY + 1;
dim3 dimGrid3(gridSizeX3, gridSizeY3);
// printf("myid = %d, grid3 = (%d, %d)\n", *myid, gridSizeX3, gridSizeY3);
if (lby[1] >= lby[0])
{
hipLaunchKernelGGL(( vel_PmlY_IC), dim3(dimGrid3), dim3(dimBlock), 0, 0, *nztop,
*ca,
lby[0],
lby[1],
nd1_velD,
rhoD,
drvh1D,
drti1D,
idmat1D,
damp1_yD,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*mw1_pml1, //dimension #s
*mw1_pml,
*nxtop,
*nytop,
v1xD, //output
v1yD,
v1zD,
v1x_pyD,
v1y_pyD,
v1z_pyD);
// printf("vel_PmlY_IC called!\n");
}
int gridSizeX4 = (nd2_vel[3] - nd2_vel[2])/blockSizeX + 1;
int gridSizeY4 = (nd2_vel[9] - nd2_vel[8])/blockSizeY + 1;
dim3 dimGrid4(gridSizeX4, gridSizeY4);
// printf("myid = %d, grid4 = (%d, %d)\n", *myid, gridSizeX4, gridSizeY4);
hipLaunchKernelGGL(( velocity_inner_IIC), dim3(dimGrid4), dim3(dimBlock), 0, 0, *ca,
nd2_velD,
rhoD,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
idmat2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD);
// printf("velocity_inner_IIC called!\n");
int gridSizeX5 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1;
int gridSizeY5 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid5(gridSizeX5, gridSizeY5);
// printf("myid = %d, grid5 = (%d, %d)\n", *myid, gridSizeX5, gridSizeY5);
if (lbx[1] >= lbx[0])
{
hipLaunchKernelGGL(( vel_PmlX_IIC), dim3(dimGrid5), dim3(dimBlock), 0, 0, *nzbm1,
*ca,
lbx[0],
lbx[1],
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_xD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pxD,
v2y_pxD,
v2z_pxD);
// printf("vel_PmlX_IIC called!\n");
}
int gridSizeX6 = (lby[1] - lby[0])/blockSizeX + 1;
int gridSizeY6 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1;
dim3 dimGrid6(gridSizeX6, gridSizeY6);
// printf("myid = %d, grid = (%d, %d)\n", *myid, gridSizeX6, gridSizeY6);
if (lby[1] >= lby[0])
{
hipLaunchKernelGGL(( vel_PmlY_IIC), dim3(dimGrid6), dim3(dimBlock), 0, 0, *nzbm1,
*ca,
lby[0],
lby[1],
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_yD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pyD,
v2y_pyD,
v2z_pyD);
// printf("vel_PmlY_IIC called!\n");
}
int gridSizeX7 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1;
int gridSizeY7 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1;
dim3 dimGrid7(gridSizeX7, gridSizeY7);
// printf("myid = %d, grid7 = (%d, %d)\n", *myid, gridSizeX7, gridSizeY7);
hipLaunchKernelGGL(( vel_PmlZ_IIC), dim3(dimGrid7), dim3(dimBlock), 0, 0, *nzbm1,
*ca,
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_zD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pzD,
v2y_pzD,
v2z_pzD);
// printf("vel_PmlZ_IIC called!\n");
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeCompV += tmpTime;
gettimeofday(&t1, NULL);
cpy_d2h_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeD2HV += tmpTime;
// for debug
// int size = (*nztop + 2) * (*nxtop + 3) * (*nytop + 3);
// write_output(v1xM, size, "OUTPUT_ARRAYS/v1xM.txt");
// write_output(v1yM, size, "OUTPUT_ARRAYS/v1yM.txt");
// write_output(v1zM, size, "OUTPUT_ARRAYS/v1zM.txt");
// size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3);
// write_output(v2xM, size, "OUTPUT_ARRAYS/v2xM.txt");
// write_output(v2yM, size, "OUTPUT_ARRAYS/v2yM.txt");
// write_output(v2zM, size, "OUTPUT_ARRAYS/v2zM.txt");
return;
}
#ifdef __cplusplus
}
#endif
__global__ void velocity_inner_IC(int nztop,
int nztm1,
float ca,
int *nd1_vel,
float *rhoM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int nxtop, //dimension #
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM)
{
int i, j, k, k3;
float dtxz, dtyz, dtzz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_vel[8];
if (j > nd1_vel[3] || i > nd1_vel[9])
{
return;
}
for (k3 = 1; k3 <= 3; k3++)
{
k=k3;
if(k3==3) k=nztop;
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j) -35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(2,i,j)+dzi1(3,k)*t1xz(3,i,j)+dzi1(4,k)*t1xz(4,i,j);
dtyz=dzi1(2,k)*t1yz(2,i,j)+dzi1(3,k)*t1yz(3,i,j)+dzi1(4,k)*t1yz(4,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j) +29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
v1x(k,i,j)=v1x(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))*
(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+
dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+dtxz);
v1y(k,i,j)=v1y(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))*
(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+
dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+dtyz);
v1z(k,i,j)=v1z(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))*
(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+
dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+dtzz);
}
for (k = 3; k <=nztm1; k++)
{
v1x(k,i,j)=v1x(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))*
(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+
dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+
dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j));
v1y(k,i,j)=v1y(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))*
(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+
dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+
dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j));
v1z(k,i,j)=v1z(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))*
(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+
dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+
dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j));
}
return;
}
//-----------------------------------------------------------------------
__global__ void velocity_inner_IIC(float ca,
int *nd2_vel,
float *rhoM,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
int *idmat2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int nxbtm, //dimension #s
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM)
{
int i, j, k;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[8];
if (j > nd2_vel[3] || i > nd2_vel[9])
{
return;
}
//for (j = nd2_vel(3); j <= nd2_vel(4); j++)
//for (j = nd2_vel[2]; j <= nd2_vel[3]; j++)
//{
//for (i = nd2_vel[8]; i <= nd2_vel[9]; i++)
//{
k=1;
v2x(k,i,j)=v2x(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+
dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j));
v2y(k,i,j)=v2y(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+
dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j));
v2z(k,i,j)=v2z(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+
dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)));
//for (k = 2; k <= nd2_vel(16); k++)
for (k = 2; k <= nd2_vel[15]; k++)
{
v2x(k,i,j)=v2x(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+
dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j));
v2y(k,i,j)=v2y(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+
dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j));
v2z(k,i,j)=v2z(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+
dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j));
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlX_IC(float ca,
int lbx0,
int lbx1,
int *nd1_vel,
float *rhoM,
float *drvh1M,
float *drti1M,
float *damp1_xM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int mw1_pml1, //dimension #
int mw1_pml,
int nxtop,
int nytop,
int nztop,
float *v1xM, //output
float *v1yM,
float *v1zM,
float *v1x_pxM,
float *v1y_pxM,
float *v1z_pxM)
{
// !Compute the velocities in region of PML-x-I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
int i,j,k,lb,ib,kb;
float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz,dtxy,dtyy,dtzy;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
//int nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml;
int nv2x=(lbx1 - lbx0 + 1) * mw1_pml;
//if ( lbx(1)>lbx(2) ) return;
if (lbx0 > lbx1)
{
return;
}
if (j > nd1_vel[5] || lb > lbx1)
{
return;
}
//calculate the value of ib
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_vel[6+4*k]; i <= nd1_vel[7+4*k]; i++)
{
ib++;
}
}
//for (j = nd1_vel(1); j <= nd1_vel(6); j++)
//for (j = nd1_vel[0]; j <= nd1_vel[5]; j++)
//{
//ib=0;
//for (lb = lbx(1); lb <= lbx(2); lb++)
//for (lb = lbx[0]; lb <= lbx[1]; lb++)
//{
kb=0;
//for (i = nd1_vel(7+4*lb); i <= nd1_vel(8+4*lb); i++)
for (i = nd1_vel[6+4*lb]; i <= nd1_vel[7+4*lb]; i++)
{
kb=kb+1;
ib=ib+1;
rth=drvh1(kb,lb);
rti=drti1(kb,lb);
for (k = 1; k <= nztop; k++)
{
damp0=damp1_x(k,j,lb);
dmpx2=1./(1.+rth*damp0);
dmpx1=dmpx2*2.-1.;
dmpyz2=1./(1.+rti*damp0);
dmpyz1=dmpyz2*2.-1.;
ro1=rho(idmat1(k,i,j));
rox=0.5*(ro1+rho(idmat1(k,i+1,j)));
roy=0.5*(ro1+rho(idmat1(k,i,j+1)));
roz=0.5*(ro1+rho(idmat1(k-1,i,j)));
vtmpx=v1x(k,i,j)-v1x_px(k,ib,j);
vtmpy=v1y(k,i,j)-v1y_px(k,ib,j);
vtmpz=v1z(k,i,j)-v1z_px(k,ib,j);
//if(j>nd1_vel(2) && j<nd1_vel(5))
if(j>nd1_vel[1] && j<nd1_vel[4])
{
dtxy=dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1);
dtyy=dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2);
dtzy=dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1);
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(k,i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(2,k)*t1yz(k,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else if(k==nztop)
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
else
{
dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j);
}
vtmpx=vtmpx+(dtxy+dtxz)*rox;
vtmpy=vtmpy+(dtyy+dtyz)*roy;
vtmpz=vtmpz+(dtzy+dtzz)*roz;
}
v1x_px(k,ib,j)=v1x_px(k,ib,j)*dmpx1+dmpx2*rox*
dxi1(2,i)/ca*(t1xx(k,i,j)-t1xx(k,i+1,j));
v1x(k,i,j)=vtmpx+v1x_px(k,ib,j);
v1y_px(k,ib,j)=v1y_px(k,ib,j)*dmpyz1+dmpyz2*roy*
dxh1(2,i)/ca*(t1xy(k,i-1,j)-t1xy(k,i,j));
v1y(k,i,j)=vtmpy+v1y_px(k,ib,j);
v1z_px(k,ib,j)=v1z_px(k,ib,j)*dmpyz1+dmpyz2*roz*
dxh1(2,i)/ca*(t1xz(k,i-1,j)-t1xz(k,i,j));
v1z(k,i,j)=vtmpz+v1z_px(k,ib,j);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlY_IC(int nztop,
float ca,
int lby0,
int lby1,
int *nd1_vel,
float *rhoM,
float *drvh1M,
float *drti1M,
int *idmat1M,
float *damp1_yM,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int mw1_pml1, //dimension #s
int mw1_pml,
int nxtop,
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM,
float *v1x_pyM,
float *v1y_pyM,
float *v1z_pyM)
{
int i,j,k,lb,jb,kb, jbIni;
float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz,
dtxz,dtyz,dtzz,vtmpx,vtmpy,vtmpz;
//if( lby(1)>lby(2) )
if( lby0>lby1 )
return;
lb = blockDim.x * blockIdx.x + threadIdx.x + lby0;
i = blockDim.y * blockIdx.y + threadIdx.y + nd1_vel[6];
if (lb > lby1 || i > nd1_vel[11])
{
return;
}
jbIni = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_vel[4*k]; j <= nd1_vel[1+4*k]; j++)
{
jbIni++;
}
}
jb = jbIni;
kb = 0;
//for (lb = lby(1); lb <= lby(2); lb++)
//for (lb = lby0; lb <= lby1; lb++)
//{
// kb=0;
// //for (i = nd1_vel(7); i <= nd1_vel(12); i++)
// for (i = nd1_vel[6]; i <= nd1_vel[11]; i++)
// {
//for (j = nd1_vel(1+4*lb); j <= nd1_vel(2+4*lb); j++)
for (j = nd1_vel[4*lb]; j <= nd1_vel[1+4*lb]; j++)
{
kb=kb+1;
jb=jb+1;
rth=drvh1(kb,lb);
rti=drti1(kb,lb);
for (k = 1; k <= nztop; k++)
{
damp0=damp1_y(k,i,lb);
dmpy2=1./(1.+rth*damp0);
dmpy1=dmpy2*2.-1.;
dmpxz2=1./(1.+rti*damp0);
dmpxz1=dmpxz2*2.-1.;
ro1=rho(idmat1(k,i,j));
rox=0.5*(ro1+rho(idmat1(k,i+1,j)));
roy=0.5*(ro1+rho(idmat1(k,i,j+1)));
roz=0.5*(ro1+rho(idmat1(k-1,i,j)));
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(k,i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(2,k)*t1yz(k,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else if(k==nztop)
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
else
{
dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j);
}
vtmpx=v1x(k,i,j)-v1x_py(k,i,jb)+dtxz*rox;
vtmpy=v1y(k,i,j)-v1y_py(k,i,jb)+dtyz*roy;
vtmpz=v1z(k,i,j)-v1z_py(k,i,jb)+dtzz*roz;
//if(i>nd1_vel(8) && i<nd1_vel(11))
if(i>nd1_vel[7] && i<nd1_vel[10])
{
vtmpx=vtmpx+
rox*(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j));
vtmpy=vtmpy+
roy*(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j));
vtmpz=vtmpz+
roz*(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j));
}
v1x_py(k,i,jb)=v1x_py(k,i,jb)*dmpxz1+dmpxz2*
rox*dyh1(2,j)/ca*(t1xy(k,i,j-1)-t1xy(k,i,j));
v1x(k,i,j)=vtmpx+v1x_py(k,i,jb);
v1y_py(k,i,jb)=v1y_py(k,i,jb)*dmpy1+dmpy2*
roy*dyi1(2,j)/ca*(t1yy(k,i,j)-t1yy(k,i,j+1));
v1y(k,i,j)=vtmpy+v1y_py(k,i,jb);
v1z_py(k,i,jb)=v1z_py(k,i,jb)*dmpxz1+dmpxz2*
roz*dyh1(2,j)/ca*(t1yz(k,i,j-1)-t1yz(k,i,j));
v1z(k,i,j)=vtmpz+v1z_py(k,i,jb);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlX_IIC(int nzbm1,
float ca,
int lbx0,
int lbx1,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_xM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pxM,
float *v2y_pxM,
float *v2z_pxM)
{
int i,j,k,lb,ib,kb;
float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxy,dtyy,dtzy,dtxz,dtyz,dtzz;
//int nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml;
int nv2y = (lbx1 - lbx0 + 1) * mw2_pml;
//if ( lbx(1)>lbx(2) ) return;
if ( lbx0>lbx1 ) return;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_vel[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd2_vel[6+4*k]; i <= nd2_vel[7+4*k]; i++)
{
ib++;
}
}
//for (j = nd2_vel(1); j <= nd2_vel(6); j++)
//for (j = nd2_vel[0]; j <= nd2_vel[5]; j++)
//{
//ib=0;
//for (lb = lbx(1); lb <= lbx(2); lb++)
//for (lb = lbx0; lb <= lbx1; lb++)
//{
kb=0;
//for (i = nd2_vel(7+4*lb); i <= nd2_vel(8+4*lb); i++)
for (i = nd2_vel[6+4*lb]; i <= nd2_vel[7+4*lb]; i++)
{
kb=kb+1;
ib=ib+1;
rth=drvh2(kb,lb);
rti=drti2(kb,lb);
for (k = 1; k <= nzbm1; k++)
{
damp0=damp2_x(k,j,lb);
dmpx2=1./(1.+rth*damp0);
dmpx1=dmpx2*2.-1.;
dmpyz2=1./(1.+rti*damp0);
dmpyz1=dmpyz2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_px(k,ib,j);
vtmpy=v2y(k,i,j)-v2y_px(k,ib,j);
vtmpz=v2z(k,i,j)-v2z_px(k,ib,j);
//if(j>nd2_vel(2) && j<nd2_vel(5))
if(j>nd2_vel[1] && j<nd2_vel[4])
{
dtxy=dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1);
dtyy=dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2);
dtzy=dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1);
if(k==1)
{
dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
}
//else if(k<nd2_vel(17))
else if(k<nd2_vel[16])
{
dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j);
dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j);
dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j);
}
else
{
dtxz=0.0;
dtyz=0.0;
dtzz=0.0;
}
vtmpx=vtmpx+(dtxy+dtxz)*rox;
vtmpy=vtmpy+(dtyy+dtyz)*roy;
vtmpz=vtmpz+(dtzy+dtzz)*roz;
}
v2x_px(k,ib,j)=v2x_px(k,ib,j)*dmpx1+dmpx2*
rox*dxi2(2,i)/ca*(t2xx(k,i,j)-t2xx(k,i+1,j));
v2x(k,i,j)=vtmpx+v2x_px(k,ib,j);
v2y_px(k,ib,j)=v2y_px(k,ib,j)*dmpyz1+dmpyz2*
roy*dxh2(2,i)/ca*(t2xy(k,i-1,j)-t2xy(k,i,j));
v2y(k,i,j)=vtmpy+v2y_px(k,ib,j);
v2z_px(k,ib,j)=v2z_px(k,ib,j)*dmpyz1+dmpyz2*
roz*dxh2(2,i)/ca*(t2xz(k,i-1,j)-t2xz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_px(k,ib,j);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlY_IIC(int nzbm1,
float ca,
int lby0,
int lby1,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_yM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1,
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pyM,
float *v2y_pyM,
float *v2z_pyM)
{
int i,j,k,lb,jb,kb, jbIni;
float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz;
//if( lby(1)>lby(2) ) return;
if( lby0>lby1 )
{
return;
}
lb = blockIdx.x * blockDim.x + threadIdx.x + lby0;
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6];
if (lb > lby1 || i > nd2_vel[11])
{
return;
}
jbIni = 0;
for (j = lby0; j < lb; j++)
{
for (k = nd2_vel[4*j]; k <= nd2_vel[1+4*j]; k++)
{
jbIni++;
}
}
jb = jbIni;
kb = 0;
//for (lb = lby(1); lb <= lby(2); lb++)
//for (lb = lby0; lb <= lby1; lb++)
//{
//kb=0;
//for (i = nd2_vel(7); i <= nd2_vel(12); i++)
//for (i = nd2_vel[6]; i <= nd2_vel[11]; i++)
//{
//for (j = nd2_vel(1+4*lb); j <= nd2_vel(2+4*lb); j++)
for (j = nd2_vel[4*lb]; j <= nd2_vel[1+4*lb]; j++)
{
kb=kb+1;
jb=jb+1;
rth=drvh2(kb,lb);
rti=drti2(kb,lb);
for (k = 1; k <= nzbm1; k++)
{
damp0=damp2_y(k,i,lb);
dmpy2=1./(1.+rth*damp0);
dmpy1=dmpy2*2.-1.0;
dmpxz2=1./(1.+rti*damp0);
dmpxz1=dmpxz2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_py(k,i,jb);
vtmpy=v2y(k,i,j)-v2y_py(k,i,jb);
vtmpz=v2z(k,i,j)-v2z_py(k,i,jb);
//if(k<nd2_vel(17))
if(k<nd2_vel[16])
{
if(k>1)
{
dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j);
dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j);
dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j);
}
else
{
dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
}
//if(i>nd2_vel(8) && i<nd2_vel(11))
if(i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*(dtxz+
dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j));
vtmpy=vtmpy+roy*(dtyz+
dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j));
vtmpz=vtmpz+roz*(dtzz+
dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j));
}
else
{
vtmpx=vtmpx+rox*dtxz;
vtmpy=vtmpy+roy*dtyz;
vtmpz=vtmpz+roz*dtzz;
}
}
else
{
//if(i>nd2_vel(8) && i<nd2_vel(11))
if(i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j));
vtmpy=vtmpy+ roy*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j));
vtmpz=vtmpz+ roz*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j));
}
}
v2x_py(k,i,jb)=v2x_py(k,i,jb)*dmpxz1+dmpxz2*rox*
dyh2(2,j)/ca*(t2xy(k,i,j-1)-t2xy(k,i,j));
v2x(k,i,j)=vtmpx+v2x_py(k,i,jb);
v2y_py(k,i,jb)=v2y_py(k,i,jb)*dmpy1+dmpy2*roy*
dyi2(2,j)/ca*(t2yy(k,i,j)-t2yy(k,i,j+1));
v2y(k,i,j)=vtmpy+v2y_py(k,i,jb);
v2z_py(k,i,jb)=v2z_py(k,i,jb)*dmpxz1+dmpxz2*roz*
dyh2(2,j)/ca*(t2yz(k,i,j-1)-t2yz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_py(k,i,jb);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlZ_IIC(int nzbm1,
float ca,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_zM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pzM,
float *v2y_pzM,
float *v2z_pzM)
{
int i,j,k,kb;
float damp0,dmpz2,dmpz1,dmpxy2,dmpxy1,ro1,rox,roy,roz,vtmpx,vtmpy,vtmpz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6];
if (j > nd2_vel[5] || i > nd2_vel[11])
{
return;
}
//for (j = nd2_vel(1); j <= nd2_vel(6); j++)
//for (j = nd2_vel[0]; j <= nd2_vel[5]; j++)
//{
//for (i = nd2_vel(7); i <= nd2_vel(12); i++)
//for (i = nd2_vel[6]; i <= nd2_vel[11]; i++)
//{
kb=0;
damp0=damp2_z(i,j);
//for (k = nd2_vel(17); k <= nzbm1; k++)
for (k = nd2_vel[16]; k <= nzbm1; k++)
{
kb=kb+1;
dmpz2=1./(1.+damp0*drti2(kb,1));
dmpz1=dmpz2*2.-1.;
dmpxy2=1./(1.+damp0*drvh2(kb,1));
dmpxy1=dmpxy2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_pz(kb,i,j);
vtmpy=v2y(k,i,j)-v2y_pz(kb,i,j);
vtmpz=v2z(k,i,j)-v2z_pz(kb,i,j);
//if(j>nd2_vel(2) && j<nd2_vel(5) &&
// i>nd2_vel(8) && i<nd2_vel(11))
if(j>nd2_vel[1] && j<nd2_vel[4] &&
i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1));
vtmpy=vtmpy+roy*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2));
vtmpz=vtmpz+roz*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1));
}
v2x_pz(kb,i,j)=v2x_pz(kb,i,j)*dmpxy1+dmpxy2*rox*
dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
v2x(k,i,j)=vtmpx+v2x_pz(kb,i,j);
v2y_pz(kb,i,j)=v2y_pz(kb,i,j)*dmpxy1+dmpxy2*roy*
dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
v2y(k,i,j)=vtmpy+v2y_pz(kb,i,j);
v2z_pz(kb,i,j)=v2z_pz(kb,i,j)*dmpz1+dmpz2*roz*
dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_pz(kb,i,j);
}
//}
//}
return;
}
//stress computation----------------------------------------------
__global__ void stress_norm_xy_IC(int nxb1,
int nyb1,
int nxtop,
int nztop,
int *nd1_tyy,
int *idmat1M,
float ca,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh1M,
float *dyh1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *t1xxM,
float *t1xyM,
float *t1yyM,
float *t1zzM,
float *qt1xxM,
float *qt1xyM,
float *qt1yyM,
float *qt1zzM,
float *v1xM,
float *v1yM,
float *v1zM)
{
int i,j,k,jkq,kodd,inod,irw;
float sxx,syy,szz,sxy,qxx,qyy,qzz,qxy,cusxy,sss,cl,sm2,pm,et,et1,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyy[8];
if (j > nd1_tyy[3] || i > nd1_tyy[9])
{
return;
}
// for (j = nd1_tyy[2]; j <= nd1_tyy[3]; j++)
// {
kodd = 2 * ((j + nyb1) & 1) + 1;
// for (i = nd1_tyy[8]; i <= nd1_tyy[9]; i++)
// {
jkq=((i+nxb1) & 1) + kodd;
for (k = nd1_tyy[12]; k <= nd1_tyy[17]; k++)
{
sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+
dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j);
syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+
dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1);
sxy=dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i, j)+
dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j)+
dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j )+
dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2);
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.0;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else
{
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
cusxy=sxy/(1./sm2+.5/cmu(idmat1(k,i+1,j+1)));
sss=sxx+syy+szz;
irw=jkq+4*(k&1);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1;
t1xx(k,i,j)=t1xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1;
t1yy(k,i,j)=t1yy(k,i,j)+sm2*syy+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1;
t1zz(k,i,j)=t1zz(k,i,j)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1;
t1xy(k,i,j)=t1xy(k,i,j)+cusxy-qxy-qt1xy(k,i,j);
}
// }
// }
return;
}
//-----------------------------------------------------------------------------
__global__ void stress_xz_yz_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int *nd1_tyz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *dzh1M,
float *v1xM,
float *v1yM,
float *v1zM,
float *t1xzM,
float *t1yzM,
float *qt1xzM,
float *qt1yzM)
// Compute stress-XZand YZ component in Region I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// real, parameter:: tfr1=-577./528./ca,tfr2=201./176./ca, &
// tfr3=-9./176./ca, tfr4=1./528./ca
{
// float tfr1 = -577./528./ca;
// float tfr2 = 201./176./ca;
// float tfr3 = -9./176./ca;
// float tfr4=1./528./ca;
int i,j,k,kodd,inod,jkq,irw;
float dvzx,dvzy,dvxz,dvyz,sm,cusxz,cusyz,et,et1,dmws,qxz,qyz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyz[8];
if (j > nd1_tyz[3] || i > nd1_tyz[9])
{
return;
}
// for (j=nd1_tyz[2]; j <=nd1_tyz[3]; j++)
// //do j=nd1_tyz(3),nd1_tyz(4)
// {
//kodd=2*mod(j+nyb1,2)+1
kodd=2*((j+nyb1)&1)+1;
// for (i=nd1_tyz[8]; i<=nd1_tyz[9]; i++)
// //do i=nd1_tyz(9),nd1_tyz(10)
// {
//jkq=mod(i+nxb1,2)+kodd
jkq=((i+nxb1)&1)+kodd;
for (k=nd1_tyz[12]; k<=nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+
dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j);
dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+
dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2);
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
inod=idmat1(k,i,j);
sm=cmu(inod);
cusxz=(dvzx+dvxz)/(.5/sm+.5/cmu(idmat1(k-1,i+1,j)));
cusyz=(dvzy+dvyz)/(.5/sm+.5/cmu(idmat1(k-1,i,j+1)));
//irw=jkq+4*mod(k,2);
irw=jkq+4*(k&1);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j);
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j);
}
// }
// }
return;
}
__global__ void stress_resetVars(int ny1p1,
int nx1p1,
int nxtop,
int nytop,
int nztop,
float *t1xzM,
float *t1yzM)
{
int i, j;
j = blockIdx.x * blockDim.x + threadIdx.x - 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j <= ny1p1 && i <= nxtop)
{
t1yz(1, i, j) = 0.0f;
}
// for (j=-1; j<=ny1p1; j++)
// {
// for (i = 1; i <= nxtop; i++)
// {
// t1yz(1,i,j)=0.0;
// }
// }
j = j + 2;
i = i - 2;
if (j <= nytop && i <= nx1p1)
{
t1xz(1, i, j) = 0.0;
}
// for (j=1; j <= nytop; j++)
// {
// for (i=-1; i <=nx1p1; i++)
// {
// t1xz(1,i,j)=0.0;
// }
// }
return;
}
//------------------------------------------------------------------------------------
__global__ void stress_norm_PmlX_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int mw1_pml,
int mw1_pml1,
int lbx0,
int lbx1,
int *nd1_tyy,
int *idmat1M,
float ca,
float *drti1M,
float *damp1_xM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *v1xM,
float *v1yM,
float *v1zM,
float *t1xxM,
float *t1yyM,
float *t1zzM,
float *t1xx_pxM,
float *t1yy_pxM,
float *qt1xxM,
float *qt1yyM,
float *qt1zzM,
float *qt1xx_pxM,
float *qt1yy_pxM)
// Compute the velocity of PML-x-I region
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
// real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
int nti;
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_tyy[5] || lb > lbx1)
{
return;
}
nti = (lbx1 - lbx0 + 1) * mw1_pml + lbx1;
// for (j=nd1_tyy[0]; j <= nd1_tyy[5]; j++)
// //do j=nd1_tyy(1),nd1_tyy(6)
// {
kodd=2*((j+nyb1)&1)+1;
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_tyy[6+4*k]; i <= nd1_tyy[7+4*k]; i++)
ib++;
}
// for (lb=lbx[0]; lb <=lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_tyy[6+4*lb]; i <= nd1_tyy[7+4*lb]; i++)
//do i=nd1_tyy(7+4*lb),nd1_tyy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rti=drti1(kb,lb);
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++)
//do k=nd1_tyy(13),nd1_tyy(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rti);
damp1=damp2*2.0-1.;
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t1xx(k,i,j)-t1xx_px(k,ib,j);
//debug
//t1xx(k,i,j)=t1xx_px(k,ib,j);
taoyy=t1yy(k,i,j)-t1yy_px(k,ib,j);
taozz=t1zz(k,i,j)-t1yy_px(k,ib,j);
if(j>nd1_tyy[1] && j<nd1_tyy[4]) {
//if(j>nd1_tyy(2) .and. j<nd1_tyy(5)) {
syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+
dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1);
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else {
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
sss=syy+szz;
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1;
taoxx=taoxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
}
sxx=dxh1(2,i)/ca*(v1x(k,i-1,j)-v1x(k,i,j));
qxx=qt1xx_px(k,ib,j);
qt1xx_px(k,ib,j)=qxx*et+wtp*sxx*et1;
t1xx_px(k,ib,j)=damp1*t1xx_px(k,ib,j)+
damp2*(pm*sxx-qxx-qt1xx_px(k,ib,j));
t1xx(k,i,j)=taoxx+t1xx_px(k,ib,j);
qyy=qt1yy_px(k,ib,j);
qt1yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1;
t1yy_px(k,ib,j)=damp1*t1yy_px(k,ib,j)+
damp2*(cl*sxx-qyy-qt1yy_px(k,ib,j));
t1yy(k,i,j)=taoyy+t1yy_px(k,ib,j);
t1zz(k,i,j)=taozz+t1yy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_tyy,
int *idmat1M,
float ca,
float *drti1M,
float *damp1_yM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh1M,
float *dyh1M,
float *dzi1M,
float *t1xxM,
float *t1yyM,
float *t1zzM,
float *qt1xxM,
float *qt1yyM,
float *qt1zzM,
float *t1xx_pyM,
float *t1yy_pyM,
float *qt1xx_pyM,
float *qt1yy_pyM,
float *v1xM,
float *v1yM,
float *v1zM)
// Compute the velocity of PML-x-I region
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
// real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
//if(lby[0]>lby[1]) return;
//if(lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_tyy[11] || lb > lby1)
{
return;
}
// for (i = nd1_tyy[6]; i <= nd1_tyy[11]; i++)
// //do i=nd1_tyy(7),nd1_tyy(12)
// {
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_tyy[4*k]; j <= nd1_tyy[1+4*k]; j++)
{
jb++;
}
}
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd1_tyy[4*lb]; j <= nd1_tyy[1+4*lb]; j++)
//do j=nd1_tyy(1+4*lb),nd1_tyy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rti=drti1(kb,lb);
kodd=2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1
jkq = ((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++)
//do k=nd1_tyy(13),nd1_tyy(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rti);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
//irw=jkq+4*mod(k,2)
irw=jkq + 4 * (k & 1);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if (i>nd1_tyy[7] && i<nd1_tyy[10]) {
//if(i>nd1_tyy(8) .and. i<nd1_tyy(11)) then
sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+
dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j);
}
else {
sxx=0.0;
}
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else {
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
sss=sxx+szz;
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1;
taoxx=t1xx(k,i,j)-t1xx_py(k,i,jb)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1;
taoyy=t1yy(k,i,j)-t1yy_py(k,i,jb)+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1;
taozz=t1zz(k,i,j)-t1xx_py(k,i,jb)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
syy=dyh1(2,j)/ca*(v1y(k,i,j-1)-v1y(k,i,j));
qxx=qt1xx_py(k,i,jb);
qt1xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1;
t1xx_py(k,i,jb)=damp1*t1xx_py(k,i,jb)+
damp2*(cl*syy-qxx-qt1xx_py(k,i,jb));
t1xx(k,i,j)=taoxx+t1xx_py(k,i,jb);
t1zz(k,i,j)=taozz+t1xx_py(k,i,jb);
qyy=qt1yy_py(k,i,jb);
qt1yy_py(k,i,jb)=qyy*et+wtp*syy*et1;
t1yy_py(k,i,jb)=damp1*t1yy_py(k,i,jb)+
damp2*(pm*syy-qyy-qt1yy_py(k,i,jb));
t1yy(k,i,j)=taoyy+t1yy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlX_IC(int nxb1,
int nyb1,
int mw1_pml,
int mw1_pml1,
int nxtop,
int nytop,
int nztop,
int lbx0,
int lbx1,
int *nd1_txy,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *t1xyM,
float *qt1xyM,
float *t1xy_pxM,
float *qt1xy_pxM,
float *v1xM,
float *v1yM)
// Compute the Stress-xy at region of PML-x-I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
// real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_txy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_txy[6+4*k]; i <= nd1_txy[7+4*k]; i++)
{
ib++;
}
}
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return
// for (j = nd1_txy[0]; j <= nd1_txy[5]; j++)
// //do j=nd1_txy(1),nd1_txy(6)
// {
kodd = 2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1
// ib=0;
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_txy[6+4*lb]; i <= nd1_txy[7+4*lb]; i++)
//do i=nd1_txy(7+4*lb),nd1_txy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth1(kb,lb);
jkq=((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd;
for (k = nd1_txy[12]; k <= nd1_txy[17]; k++)
//do k=nd1_txy(13),nd1_txy(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1)));
irw=jkq + 4 * (k & 1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t1xy(k,i,j)-t1xy_px(k,ib,j);
if(j > nd1_txy[1] && j<nd1_txy[4]) {
//if(j>nd1_txy(2) .and. j<nd1_txy(5)) then
cusxy=(dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j)+
dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2))*sm;
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt1xy(k,i,j);
}
cusxy=sm*dxi1(2,i)/ca*(v1y(k,i,j)-v1y(k,i+1,j));
qxy=qt1xy_px(k,ib,j);
qt1xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1;
t1xy_px(k,ib,j)=damp1*t1xy_px(k,ib,j)+
damp2*(cusxy-qxy-qt1xy_px(k,ib,j));
t1xy(k,i,j)=taoxy+t1xy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_txy,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *t1xyM,
float *qt1xyM,
float *t1xy_pyM,
float *qt1xy_pyM,
float *v1xM,
float *v1yM)
//Compute the Stress-xy at region of PML-y-I
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_txy[11] || lb > lby1)
{
return;
}
// for (i = nd1_txy[6]; i <= nd1_txy[11]; i++)
// //do i=nd1_txy(7),nd1_txy(12)
// {
jb=0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_txy[4*k]; j <= nd1_txy[1 + 4 * k]; j++)
{
jb++;
}
}
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1), lby(2)
// {
kb=0;
for (j = nd1_txy[4*lb]; j <= nd1_txy[1 + 4 * lb]; j++)
//do j=nd1_txy(1+4*lb),nd1_txy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth1(kb,lb);
kodd=2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1;
jkq=((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txy[12]; k <= nd1_txy[17]; k++)
//do k=nd1_txy(13),nd1_txy(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t1xy(k,i,j)-t1xy_py(k,i,jb);
if(i > nd1_txy[7] && i<nd1_txy[10]) {
//if(i>nd1_txy(8) .and. i<nd1_txy(11)) then
cusyx=(dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i,j)+
dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j))*sm;
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+dmws*cusyx*et1;
taoxy=taoxy+cusyx-qxy-qt1xy(k,i,j);
}
cusyx=sm*dyi1(2,j)/ca*(v1x(k,i,j)-v1x(k,i,j+1));
qxy=qt1xy_py(k,i,jb);
qt1xy_py(k,i,jb)=qxy*et+dmws*cusyx*et1;
t1xy_py(k,i,jb)=damp1*t1xy_py(k,i,jb)+
damp2*(cusyx-qxy-qt1xy_py(k,i,jb));
t1xy(k,i,j)=taoxy+t1xy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlX_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int mw1_pml,
int mw1_pml1,
int lbx0,
int lbx1,
int *nd1_txz,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dzh1M,
float *t1xzM,
float *qt1xzM,
float *t1xz_pxM,
float *qt1xz_pxM,
float *v1xM,
float *v1zM)
//Compute the stress-xz at PML-x-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return
nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_txz[5] || lb > lbx1)
{
return;
}
// for (j = nd1_txz[0]; j <= nd1_txz[5]; j++)
// //do j=nd1_txz(1),nd1_txz(6)
// {
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_txz[6+4*k]; i <= nd1_txz[7+4*k]; i++)
{
ib++;
}
}
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_txz[6+4*lb]; i <= nd1_txz[7+4*lb]; i++)
//do i=nd1_txz(7+4*lb),nd1_txz(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth1(kb,lb);
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txz[12]; k <= nd1_txz[17]; k++)
//do k=nd1_txz(13),nd1_txz(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
}
cusxz=dvxz*sm;
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=t1xz(k,i,j)-t1xz_px(k,ib,j)+cusxz-qxz-qt1xz(k,i,j);
cusxz=sm*dxi1(2,i)/ca*(v1z(k,i,j)-v1z(k,i+1,j));
qxz=qt1xz_px(k,ib,j);
qt1xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1;
t1xz_px(k,ib,j)=damp1*t1xz_px(k,ib,j)+
damp2*(cusxz-qxz-qt1xz_px(k,ib,j));
t1xz(k,i,j)=taoxz+t1xz_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlY_IC(int nxb1,
int nyb1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_txz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dzh1M,
float *t1xzM,
float *qt1xzM,
float *v1xM,
float *v1zM)
//Compute the stress-xz at PML-y-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusxz,dvxz,dvzx,qxz,sm,dmws,et,et1;
//if (lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[8];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_txz[9] || lb > lby1)
{
return;
}
// for (i = nd1_txz[8]; i <= nd1_txz[9]; i++)
// //do i=nd1_txz(9),nd1_txz(10)
// {
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
for (j = nd1_txz[4*lb]; j <= nd1_txz[1+4*lb]; j++)
//do j=nd1_txz(1+4*lb),nd1_txz(2+4*lb)
{
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txz[12]; k <= nd1_txz[17]; k++)
//do k=nd1_txz(13),nd1_txz(18)
{
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+
dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j);
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
}
cusxz=(dvzx+dvxz)*sm;
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_yz_PmlX_IC(int nxb1,
int nyb1,
int nztop,
int nxtop,
int lbx0,
int lbx1,
int *nd1_tyz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi1M,
float *dzh1M,
float *t1yzM,
float *qt1yzM,
float *v1yM,
float *v1zM)
//Compute the stress-yz at PML-x-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1;
//if(lbx[0] > lbx[1]) return;
//if(lbx(1)>lbx(2) ) return
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_tyz[3] || lb > lbx1)
{
return;
}
// for (j = nd1_tyz[2]; j <= nd1_tyz[3]; j++)
// //do j=nd1_tyz(3),nd1_tyz(4)
// {
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
for (i = nd1_tyz[6+4*lb]; i <= nd1_tyz[7+4*lb]; i++)
//do i=nd1_tyz(7+4*lb),nd1_tyz(8+4*lb)
{
jkq = ((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_tyz[12]; k <= nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+
dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2);
if(k<nztop) {
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
cusyz=(dvzy+dvyz)*sm;
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_yz_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_tyz,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi1M,
float *dzh1M,
float *t1yzM,
float *qt1yzM,
float *t1yz_pyM,
float *qt1yz_pyM,
float *v1yM,
float *v1zM)
//Compute the stress-yz at PML-y-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_tyz[11] || lb > lby1)
{
return;
}
// for (i = nd1_tyz[6]; i <= nd1_tyz[11]; i++)
// //do i=nd1_tyz(7),nd1_tyz(12)
// {
jb=0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_tyz[4*k]; j <= nd1_tyz[1+4*k]; j++)
{
jb++;
}
}
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd1_tyz[4*lb]; j <= nd1_tyz[1+4*lb]; j++)
//do j=nd1_tyz(1+4*lb),nd1_tyz(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth1(kb,lb);
kodd=2*((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1;
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyz[12]; k <= nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if(k<nztop) {
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
cusyz=dvyz*sm;
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=t1yz(k,i,j)-t1yz_py(k,i,jb)+cusyz-qyz-qt1yz(k,i,j);
cusyz=sm*dyi1(2,j)/ca*(v1z(k,i,j)-v1z(k,i,j+1));
qyz=qt1yz_py(k,i,jb);
qt1yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1;
t1yz_py(k,i,jb)=damp1*t1yz_py(k,i,jb)+
damp2*(cusyz-qyz-qt1yz_py(k,i,jb));
t1yz(k,i,j)=taoyz+t1yz_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_norm_xy_II(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_tyy,
int *idmat2M,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *t2xxM,
float *t2xyM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2xyM,
float *qt2yyM,
float *qt2zzM,
float *dxh2M,
float *dyh2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *v2xM,
float *v2yM,
float *v2zM)
// Compute stress-Norm and XY component in Region II
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,kodd,inod,jkq,irw
// real:: sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy, &
// cl,sm2,et,et1,dmws,pm,wtp,wts
{
int i,j,k,kodd,inod,jkq,irw;
float sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy,cl,sm2,et,et1,dmws,pm,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[8];
if (j > nd2_tyy[3] || i > nd2_tyy[9])
{
return;
}
// for (j=nd2_tyy[2]; j <= nd2_tyy[3]; j++)
// //do j=nd2_tyy(3),nd2_tyy(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyy[8]; i <= nd2_tyy[9]; i++)
// //do i=nd2_tyy(9),nd2_tyy(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyy[12]; k <= nd2_tyy[15]; k++)
//do k=nd2_tyy(13),nd2_tyy(16)
{
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
sxy=dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+
dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2);
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
sss=sxx+syy+szz;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
cusxy=sxy/(1./sm2+.5/cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1;
t2xx(k,i,j)=t2xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1;
t2yy(k,i,j)=t2yy(k,i,j)+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1;
t2zz(k,i,j)=t2zz(k,i,j)+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1;
t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j);
}
// }
// }
return;
}
//call stress_xz_yz_II
__global__ void stress_xz_yz_IIC(int nxb2,
int nyb2,
int nztop,
int nxbtm,
int nzbtm,
int *nd2_tyz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *dzh2M,
float *t2xzM,
float *t2yzM,
float *qt2xzM,
float *qt2yzM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute stress-XZ and YZ component in the Region II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,kodd,inod,jkq,irw
//real:: qxz,qyz,cusxz,cusyz,sm,et,et1,dmws
{
int i,j,k,kodd,inod,jkq,irw;
float qxz,qyz,cusxz,cusyz,sm,et,et1,dmws;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[8];
if (j > nd2_tyz[3] || i > nd2_tyz[9])
{
return;
}
// for (j = nd2_tyz[2]; j <= nd2_tyz[3]; j++)
// //do j=nd2_tyz(3),nd2_tyz(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyz[8]; i <= nd2_tyz[9]; i++)
// //do i=nd2_tyz(9),nd2_tyz(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_tyz[12]; k <= nd2_tyz[15]; k++)
//do k=nd2_tyz(13),nd2_tyz(16)
{
inod=idmat2(k,i,j);
sm=cmu(inod);
cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j)+
dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j))/
(.5/sm+.5/cmu(idmat2(k-1,i+1,j)));
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+
dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/
(.5/sm+.5/cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j);
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j);
}
// }
// }
return;
}
//call stress_norm_PmlX_II
__global__ void stress_norm_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nztop,
int nxbtm,
int nybtm,
int nzbtm,
int lbx0,
int lbx1,
int *nd2_tyy,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_xM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pxM,
float *t2yy_pxM,
float *qt2xx_pxM,
float *qt2yy_pxM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the Stress-norm at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
int nti;
//if(lbx[0] > lbx[1]) return;
//if( lbx(1)>lbx(2) ) return
nti = (lbx1 - lbx0 + 1) * mw2_pml + lbx1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_tyy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_tyy[6+4*k]; i <= nd2_tyy[7+4*k]; i++)
{
ib++;
}
}
// for (j=nd2_tyy[0]; j <= nd2_tyy[5]; j++)
// //do j=nd2_tyy(1),nd2_tyy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb=lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_tyy[6+4*lb]; i <= nd2_tyy[7+4*lb]; i++)
//do i=nd2_tyy(7+4*lb),nd2_tyy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rti=drti2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd;
for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(13),nd2_tyy(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rti);
damp1=damp2*2.0-1.0;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_px(k,ib,j);
taoyy=t2yy(k,i,j)-t2yy_px(k,ib,j);
taozz=t2zz(k,i,j)-t2yy_px(k,ib,j);
if(j>nd2_tyy[1] && j<nd2_tyy[4]) {
//if(j>nd2_tyy(2) .and. j<nd2_tyy(5)) {
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
if(k<nd2_tyy[16]) {
//if(k<nd2_tyy(17)) {
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
}
else {
szz=0.0;
}
sss=syy+szz;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1;
taoxx=taoxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
}
sxx=dxh2(2,i)/ca*(v2x(k,i-1,j)-v2x(k,i,j));
qxx=qt2xx_px(k,ib,j);
qt2xx_px(k,ib,j)=qxx*et+wtp*sxx*et1;
t2xx_px(k,ib,j)=damp1*t2xx_px(k,ib,j)+
damp2*(pm*sxx-qxx-qt2xx_px(k,ib,j));
t2xx(k,i,j)=taoxx+t2xx_px(k,ib,j);
qyy=qt2yy_px(k,ib,j);
qt2yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1;
t2yy_px(k,ib,j)=damp1*t2yy_px(k,ib,j)+
damp2*(cl*sxx-qyy-qt2yy_px(k,ib,j));
t2yy(k,i,j)=taoyy+t2yy_px(k,ib,j);
t2zz(k,i,j)=taozz+t2yy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlY_II(int nxb2,
int nyb2,
int nztop,
int nxbtm,
int nzbtm,
int mw2_pml1,
int lby0,
int lby1,
int *nd2_tyy,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_yM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pyM,
float *t2yy_pyM,
float *qt2xx_pyM,
float *qt2yy_pyM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the stress-norm at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
//if( lby[0] > lby[1] ) return;
//if( lby(1)>lby(2) ) return;
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_tyy[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j=nd2_tyy[4*k]; j <= nd2_tyy[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_tyy[6]; i <= nd2_tyy[11]; i++)
// //do i=nd2_tyy(7),nd2_tyy(12)
// {
// jb=0;
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j=nd2_tyy[4*lb]; j <= nd2_tyy[1+4*lb]; j++)
//do j=nd2_tyy(1+4*lb),nd2_tyy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rti=drti2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1;
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(13),nd2_tyy(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rti);
damp1=damp2*2.0-1.;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_py(k,i,jb);
taoyy=t2yy(k,i,j)-t2yy_py(k,i,jb);
taozz=t2zz(k,i,j)-t2xx_py(k,i,jb);
if(k<nd2_tyy[16]) {
//if(k<nd2_tyy(17)) then
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
if(i>nd2_tyy[7] && i<nd2_tyy[10]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) {
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
}
else {
sxx=0.0;
}
sss=sxx+szz;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1;
taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1;
taoyy=taoyy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
}
else {
if(i>nd2_tyy[7] && i<nd2_tyy[10]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) then
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+wtp*sxx*et1;
taoxx=taoxx+pm*sxx-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp-wts)*sxx*et1;
taoyy=taoyy+cl*sxx-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp-wts)*sxx*et1;
taozz=taozz+cl*sxx-qzz-qt2zz(k,i,j);
}
}
syy=dyh2(2,j)/ca*(v2y(k,i,j-1)-v2y(k,i,j));
qxx=qt2xx_py(k,i,jb);
qt2xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1;
t2xx_py(k,i,jb)=damp1*t2xx_py(k,i,jb)+damp2*(cl*syy-qxx-qt2xx_py(k,i,jb));
t2xx(k,i,j)=taoxx+t2xx_py(k,i,jb);
t2zz(k,i,j)=taozz+t2xx_py(k,i,jb);
qyy=qt2yy_py(k,i,jb);
qt2yy_py(k,i,jb)=qyy*et+wtp*syy*et1;
t2yy_py(k,i,jb)=damp1*t2yy_py(k,i,jb)+damp2*(pm*syy-qyy-qt2yy_py(k,i,jb));
t2yy(k,i,j)=taoyy+t2yy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nztop,
int nxbtm,
int nzbtm,
int *nd2_tyy,
int *idmat2M,
float ca,
float *damp2_zM,
float *drth2M,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pzM,
float *t2zz_pzM,
float *qt2xx_pzM,
float *qt2zz_pzM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the stress-norm at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[6];
if (j > nd2_tyy[5] || i > nd2_tyy[11])
{
return;
}
// for (j = nd2_tyy[0]; j <= nd2_tyy[5]; j++)
// //do j=nd2_tyy(1),nd2_tyy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i=nd2_tyy[6]; i <= nd2_tyy[11]; i++)
// //do i=nd2_tyy(7),nd2_tyy(12)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_tyy[16]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(17),nd2_tyy(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drth2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_pz(kb,i,j);
taoyy=t2yy(k,i,j)-t2xx_pz(kb,i,j);
taozz=t2zz(k,i,j)-t2zz_pz(kb,i,j);
if(i>nd2_tyy[7] && i<nd2_tyy[10] && j>nd2_tyy[1] && j<nd2_tyy[4]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11) .and. &
// j>nd2_tyy(2) .and. j<nd2_tyy(5)) then
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
sss=sxx+syy;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*syy)*et1;
taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*sxx)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp-wts)*sss*et1;
taozz=taozz+cl*sss-qzz-qt2zz(k,i,j);
}
szz=dzi2(2,k)/ca*(v2z(k,i,j)-v2z(k+1,i,j));
qxx=qt2xx_pz(kb,i,j);
qt2xx_pz(kb,i,j)=qxx*et+(wtp-wts)*szz*et1;
t2xx_pz(kb,i,j)=damp1*t2xx_pz(kb,i,j)+
damp2*(cl*szz-qxx-qt2xx_pz(kb,i,j));
t2xx(k,i,j)=taoxx+t2xx_pz(kb,i,j);
t2yy(k,i,j)=taoyy+t2xx_pz(kb,i,j);
qzz=qt2zz_pz(kb,i,j);
qt2zz_pz(kb,i,j)=qzz*et+wtp*szz*et1;
t2zz_pz(kb,i,j)=damp1*t2zz_pz(kb,i,j)+
damp2*(pm*szz-qzz-qt2zz_pz(kb,i,j));
t2zz(k,i,j)=taozz+t2zz_pz(kb,i,j);
}
// }
// }
return;
}
__global__ void stress_xy_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nxbtm,
int nybtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_txy,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *t2xy_pxM,
float *qt2xy_pxM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if(lbx[0] > lbx[1]) return;
nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0;
//nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1)
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_txy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_txy[6+4*k]; i <= nd2_txy[7+4*k]; i++)
{
ib++;
}
}
// for (j = nd2_txy[0]; j <= nd2_txy[5]; j++)
// //do j=nd2_txy(1),nd2_txy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_txy[6+4*lb]; i <= nd2_txy[7+4*lb]; i++)
//do i=nd2_txy(7+4*lb),nd2_txy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txy[12]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(13),nd2_txy(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rth);
damp1=damp2*2.0-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t2xy(k,i,j)-t2xy_px(k,ib,j);
if(j > nd2_txy[1] && j<nd2_txy[4]) {
//if(j>nd2_txy(2) .and. j<nd2_txy(5)) then
cusxy=(dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j)+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j);
}
cusxy=sm*dxi2(2,i)/ca*(v2y(k,i,j)-v2y(k,i+1,j));
qxy=qt2xy_px(k,ib,j);
qt2xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1;
t2xy_px(k,ib,j)=damp1*t2xy_px(k,ib,j)+
damp2*(cusxy-qxy-qt2xy_px(k,ib,j));
t2xy(k,i,j)=taoxy+t2xy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlY_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nztop,
int nxbtm,
int nzbtm,
int lby0,
int lby1,
int *nd2_txy,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *t2xy_pyM,
float *qt2xy_pyM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_txy[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j=nd2_txy[4*k]; j <= nd2_txy[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_txy[6]; i <= nd2_txy[11]; i++)
// //do i=nd2_txy(7),nd2_txy(12)
// {
// jb=0;
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j=nd2_txy[4*lb]; j <= nd2_txy[1+4*lb]; j++)
//do j=nd2_txy(1+4*lb),nd2_txy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txy[12]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(13),nd2_txy(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t2xy(k,i,j)-t2xy_py(k,i,jb);
if(i>nd2_txy[7] && i<nd2_txy[10]) {
//if(i>nd2_txy(8) .and. i<nd2_txy(11)) then
cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i,j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j);
}
cusxy=sm*dyi2(2,j)/ca*(v2x(k,i,j)-v2x(k,i,j+1));
qxy=qt2xy_py(k,i,jb);
qt2xy_py(k,i,jb)=qxy*et+dmws*cusxy*et1;
t2xy_py(k,i,jb)=damp1*t2xy_py(k,i,jb)+
damp2*(cusxy-qxy-qt2xy_py(k,i,jb));
t2xy(k,i,j)=taoxy+t2xy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlZ_II(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_txy,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusxy,qxy,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusxy,qxy,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txy[8];
if (j > nd2_txy[3] || i > nd2_txy[9])
{
return;
}
// for (j = nd2_txy[2]; j <= nd2_txy[3]; j++)
// //do j=nd2_txy(3),nd2_txy(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_txy[8]; i <= nd2_txy[9]; i++)
// //do i=nd2_txy(9),nd2_txy(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_txy[16]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(17),nd2_txy(18)
{
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+
dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j);
}
// }
// }
return;
}
__global__ void stress_xz_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nxbtm,
int nybtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_txz,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *t2xzM,
float *qt2xzM,
float *t2xz_pxM,
float *qt2xz_pxM,
float *v2xM,
float *v2zM)
//Compute the stress-xz at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if(lbx[0] > lbx[1]) return;
nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_txz[5] || lb > lbx1)
{
return;
}
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_txz[6+4*k]; i <= nd2_txz[7+4*k]; i++)
{
ib++;
}
}
// for (j = nd2_txz[0]; j <= nd2_txz[5]; j++)
// //do j=nd2_txz(1),nd2_txz(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb=lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_txz[6+4*lb]; i <= nd2_txz[7+4*lb]; i++)
//do i=nd2_txz(7+4*lb),nd2_txz(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txz[12]; k <= nd2_txz[17]; k++)
//do k=nd2_txz(13),nd2_txz(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxz=t2xz(k,i,j)-t2xz_px(k,ib,j);
if(k < nd2_txz[16]) {
//if(k<nd2_txz(17)) then
cusxz=(dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k,i,j)+dzh2(4,k)*v2x(k+1,i,j))*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j);
}
cusxz=sm*dxi2(2,i)/ca*(v2z(k,i,j)-v2z(k,i+1,j));
qxz=qt2xz_px(k,ib,j);
qt2xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1;
t2xz_px(k,ib,j)=damp1*t2xz_px(k,ib,j)+
damp2*(cusxz-qxz-qt2xz_px(k,ib,j));
t2xz(k,i,j)=taoxz+t2xz_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlY_IIC(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int lby0,
int lby1,
int *nd2_txz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *v2xM,
float *v2zM,
float *t2xzM,
float *qt2xzM)
//Compute the stress-xz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[8];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_txz[9] || lb > lby1)
{
return;
}
// for (i = nd2_txz[8]; i <= nd2_txz[9]; i++)
// //do i=nd2_txz(9),nd2_txz(10)
// {
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
for (j=nd2_txz[4*lb]; j <= nd2_txz[1+4*lb]; j++)
//do j=nd2_txz(1+4*lb),nd2_txz(2+4*lb)
{
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txz[12]; k <= nd2_txz[15]; k++)
//do k=nd2_txz(13),nd2_txz(16)
{
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzx=dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j);
dvxz=dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j);
cusxz=(dvzx+dvxz)*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_txz,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_zM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *t2xzM,
float *qt2xzM,
float *t2xz_pzM,
float *qt2xz_pzM,
float *v2xM,
float *v2zM)
//Compute the stress-xz at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txz[6];
if (j > nd2_txz[5] || i > nd2_txz[11])
{
return;
}
// for (j = nd2_txz[0]; j <= nd2_txz[5]; j++)
// //do j=nd2_txz(1),nd2_txz(6)
// {
kodd = 2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_txz[6]; i <= nd2_txz[11]; i++)
// //do i=nd2_txz(7),nd2_txz(12)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_txz[16]; k <= nd2_txz[17]; k++)
//do k=nd2_txz(17),nd2_txz(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drti2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxz=t2xz(k,i,j)-t2xz_pz(kb,i,j);
if(i > nd2_txz[7] && i<nd2_txz[10]) {
//if(i>nd2_txz(8) .and. i<nd2_txz(11)) then
cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j))*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j);
}
cusxz=sm*dzh2(2,k)/ca*(v2x(k-1,i,j)-v2x(k,i,j));
qxz=qt2xz_pz(kb,i,j);
qt2xz_pz(kb,i,j)=qxz*et+dmws*cusxz*et1;
t2xz_pz(kb,i,j)=damp1*t2xz_pz(kb,i,j)+
damp2*(cusxz-qxz-qt2xz_pz(kb,i,j));
t2xz(k,i,j)=taoxz+t2xz_pz(kb,i,j);
}
// }
// }
return;
}
//call stress_yz_PmlX_II
__global__ void stress_yz_PmlX_IIC(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_tyz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusyz,qyz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusyz,qyz,sm,dmws,et,et1;
//if(lbx[0] > lbx[1]) return;
//if( lbx(1)>lbx(2) ) return
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_tyz[3] || lb > lbx1)
{
return;
}
// for (j=nd2_tyz[2]; j <= nd2_tyz[3]; j++)
// //do j=nd2_tyz(3),nd2_tyz(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
for (i = nd2_tyz[6+4*lb]; i <= nd2_tyz[7+4*lb]; i++)
//do i=nd2_tyz(7+4*lb),nd2_tyz(8+4*lb)
{
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyz[12]; k <= nd2_tyz[15]; k++)
//do k=nd2_tyz(13),nd2_tyz(16)
{
inod=idmat2(k,i,j);
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+
dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/
(.5/cmu(inod)+.5/cmu(idmat2(k-1,i,j+1)));
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j);
}
}
// }
// }
return;
}
//call stress_yz_PmlY_II
__global__ void stress_yz_PmlY_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int lby0,
int lby1,
int *nd2_tyz,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *t2yz_pyM,
float *qt2yz_pyM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_tyz[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd2_tyz[4*k]; j <= nd2_tyz[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++)
// //do i=nd2_tyz(7),nd2_tyz(12)
// {
// jb=0;
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd2_tyz[4*lb]; j <= nd2_tyz[1+4*lb]; j++)
//do j=nd2_tyz(1+4*lb),nd2_tyz(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq = ((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyz[12]; k <= nd2_tyz[17]; k++)
//do k=nd2_tyz(13),nd2_tyz(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoyz=t2yz(k,i,j)-t2yz_py(k,i,jb);
if(k<nd2_tyz[16]) {
//if(k<nd2_tyz(17)) {
cusyz=(dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))*sm;
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j);
}
cusyz=sm*dyi2(2,j)/ca*(v2z(k,i,j)-v2z(k,i,j+1));
qyz=qt2yz_py(k,i,jb);
qt2yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1;
t2yz_py(k,i,jb)=damp1*t2yz_py(k,i,jb)+
damp2*(cusyz-qyz-qt2yz_py(k,i,jb));
t2yz(k,i,j)=taoyz+t2yz_py(k,i,jb);
}
}
// }
// }
return;
}
//call stress_yz_PmlZ_II
__global__ void stress_yz_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_tyz,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_zM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *t2yz_pzM,
float *qt2yz_pzM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[6];
if (j > nd2_tyz[5] || i > nd2_tyz[11])
{
return;
}
// for (j = nd2_tyz[0]; j <= nd2_tyz[5]; j++)
// //do j=nd2_tyz(1),nd2_tyz(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++)
// //do i=nd2_tyz(7),nd2_tyz(12)
// {
jkq = ((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_tyz[16]; k <= nd2_tyz[17]; k++)
//do k=nd2_tyz(17),nd2_tyz(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drti2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoyz=t2yz(k,i,j)-t2yz_pz(kb,i,j);
if (j > nd2_tyz[1] && j<nd2_tyz[4]) {
//if(j>nd2_tyz(2) .and. j<nd2_tyz(5)) then
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j)+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2))*sm;
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j);
}
cusyz=sm*dzh2(2,k)/ca*(v2y(k-1,i,j)-v2y(k,i,j));
qyz=qt2yz_pz(kb,i,j);
qt2yz_pz(kb,i,j)=qyz*et+dmws*cusyz*et1;
t2yz_pz(kb,i,j)=damp1*t2yz_pz(kb,i,j)+
damp2*(cusyz-qyz-qt2yz_pz(kb,i,j));
t2yz(k,i,j)=taoyz+t2yz_pz(kb,i,j);
}
// }
// }
return;
}
#ifdef __cplusplus
extern "C" {
#endif
void compute_stressC(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml,
int *mw1_pml1, int *nmat, int *nll, int *lbx, int *lby, int *nd1_txy, int *nd1_txz,
int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM,
float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M,
float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM,
float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM,
float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM,
float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM,
float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp,
int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz,
int *nd2_tyy, int *nd2_tyz, int *idmat2M,
float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM,
float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM,
float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M,
float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM,
float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM,
float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM,
float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM,
void **v2xMp, void **v2yMp, void **v2zMp, int *myid)
{
//printf("[CUDA] stress computation:\n");
float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM;
int blockSizeX = 8;
int blockSizeY = 8;
dim3 dimBlock(blockSizeX, blockSizeY);
v1xM = (float *) *v1xMp;
v1yM = (float *) *v1yMp;
v1zM = (float *) *v1zMp;
v2xM = (float *) *v2xMp;
v2yM = (float *) *v2yMp;
v2zM = (float *) *v2zMp;
gettimeofday(&t1, NULL);
cpy_h2d_stressInputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
cpy_h2d_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM,
t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeH2DS += tmpTime;
gettimeofday(&t1, NULL);
int gridSizeX1 = (nd1_tyy[3] - nd1_tyy[2])/blockSizeX + 1;
int gridSizeY1 = (nd1_tyy[9] - nd1_tyy[8])/blockSizeY + 1;
dim3 dimGrid1(gridSizeX1, gridSizeY1);
//int size = (*nztop) * (*nxtop + 3) * (*nytop);
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM0.txt");
hipLaunchKernelGGL(( stress_norm_xy_IC), dim3(dimGrid1), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nxtop,
*nztop,
nd1_tyyD,
idmat1D,
*ca,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh1D,
dyh1D,
dxi1D,
dyi1D,
dzi1D,
t1xxD,
t1xyD,
t1yyD,
t1zzD,
qt1xxD,
qt1xyD,
qt1yyD,
qt1zzD,
v1xD,
v1yD,
v1zD);
int gridSizeX2 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1;
int gridSizeY2 = (nd1_tyz[9] - nd1_tyz[8])/blockSizeY + 1;
dim3 dimGrid2(gridSizeX2, gridSizeY2);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM1.txt");
hipLaunchKernelGGL(( stress_xz_yz_IC), dim3(dimGrid2), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
nd1_tyzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
dzh1D,
v1xD,
v1yD,
v1zD,
t1xzD,
t1yzD,
qt1xzD,
qt1yzD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM2.txt");
int gridSizeX3Temp1 = ((*ny1p1) + 1)/blockSizeX + 1;
int gridSizeX3Temp2 = ((*nytop) - 1)/blockSizeX + 1;
int gridSizeY3Temp1 = ((*nxtop) - 1)/blockSizeY + 1;
int gridSizeY3Temp2 = ((*nx1p1) + 1)/blockSizeY + 1;
int gridSizeX3 = (gridSizeX3Temp1 > gridSizeX3Temp2) ? gridSizeX3Temp1 : gridSizeX3Temp2;
int gridSizeY3 = (gridSizeY3Temp1 > gridSizeY3Temp2) ? gridSizeY3Temp1 : gridSizeY3Temp2;
dim3 dimGrid3(gridSizeX3, gridSizeY3);
hipLaunchKernelGGL(( stress_resetVars), dim3(dimGrid3), dim3(dimBlock), 0, 0, *ny1p1,
*nx1p1,
*nxtop,
*nytop,
*nztop,
t1xzD,
t1yzD);
if (lbx[1] >= lbx[0])
{
int gridSizeX4 = (nd1_tyy[5] - nd1_tyy[0])/blockSizeX + 1;
int gridSizeY4 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid4(gridSizeX4, gridSizeY4);
//debug
/*float *t1xx_px=(float*)malloc(sizeof(float) * (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop));
hipMemcpy(t1xx_px, t1xx_pxD, sizeof(float) * (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop), hipMemcpyDeviceToHost);
write_output(t1xx_px, (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop), "OUTPUT_ARRAYS/t1xx_px_cuda.txt");*/
hipLaunchKernelGGL(( stress_norm_PmlX_IC), dim3(dimGrid4), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
*mw1_pml,
*mw1_pml1,
lbx[0],
lbx[1],
nd1_tyyD,
idmat1D,
*ca,
drti1D,
damp1_xD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dzi1D,
dxh1D,
dyh1D,
v1xD,
v1yD,
v1zD,
t1xxD,
t1yyD,
t1zzD,
t1xx_pxD,
t1yy_pxD,
qt1xxD,
qt1yyD,
qt1zzD,
qt1xx_pxD,
qt1yy_pxD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM3.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX5 = (nd1_tyy[11] - nd1_tyy[6])/blockSizeX + 1;
int gridSizeY5 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid5(gridSizeX5, gridSizeY5);
hipLaunchKernelGGL(( stress_norm_PmlY_IC), dim3(dimGrid5), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_tyyD,
idmat1D,
*ca,
drti1D,
damp1_yD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh1D,
dyh1D,
dzi1D,
t1xxD,
t1yyD,
t1zzD,
qt1xxD,
qt1yyD,
qt1zzD,
t1xx_pyD,
t1yy_pyD,
qt1xx_pyD,
qt1yy_pyD,
v1xD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM4.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX6 = (nd1_txy[5] - nd1_txy[0])/blockSizeX + 1;
int gridSizeY6 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid6(gridSizeX6, gridSizeY6);
hipLaunchKernelGGL(( stress_xy_PmlX_IC), dim3(dimGrid6), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*mw1_pml,
*mw1_pml1,
*nxtop,
*nytop,
*nztop,
lbx[0],
lbx[1],
nd1_txyD,
idmat1D,
*ca,
drth1D,
damp1_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
t1xyD,
qt1xyD,
t1xy_pxD,
qt1xy_pxD,
v1xD,
v1yD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM5.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX7 = (nd1_txy[11] - nd1_txy[6])/blockSizeX + 1;
int gridSizeY7 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid7(gridSizeX7, gridSizeY7);
hipLaunchKernelGGL(( stress_xy_PmlY_IC), dim3(dimGrid7), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_txyD,
idmat1D,
*ca,
drth1D,
damp1_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
t1xyD,
qt1xyD,
t1xy_pyD,
qt1xy_pyD,
v1xD,
v1yD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM6.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX8 = (nd1_txz[5] - nd1_txz[0])/blockSizeX + 1;
int gridSizeY8 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid8(gridSizeX8, gridSizeY8);
hipLaunchKernelGGL(( stress_xz_PmlX_IC), dim3(dimGrid8), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
*mw1_pml,
*mw1_pml1,
lbx[0],
lbx[1],
nd1_txzD,
idmat1D,
*ca,
drth1D,
damp1_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dzh1D,
t1xzD,
qt1xzD,
t1xz_pxD,
qt1xz_pxD,
v1xD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM7.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX9 = (nd1_txz[9] - nd1_txz[8])/blockSizeX + 1;
int gridSizeY9 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid9(gridSizeX9, gridSizeY9);
hipLaunchKernelGGL(( stress_xz_PmlY_IC), dim3(dimGrid9), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_txzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dzh1D,
t1xzD,
qt1xzD,
v1xD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM8.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX10 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1;
int gridSizeY10 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid10(gridSizeX10, gridSizeY10);
hipLaunchKernelGGL(( stress_yz_PmlX_IC), dim3(dimGrid10), dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*nztop,
*nxtop,
lbx[0],
lbx[1],
nd1_tyzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi1D,
dzh1D,
t1yzD,
qt1yzD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM9.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX11 = (nd1_tyz[11] - nd1_tyz[6])/blockSizeX + 1;
int gridSizeY11 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid11(gridSizeX11, gridSizeY11);
hipLaunchKernelGGL(( stress_yz_PmlY_IC), dim3(dimGrid11),dim3(dimBlock), 0, 0, *nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_tyzD,
idmat1D,
*ca,
drth1D,
damp1_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi1D,
dzh1D,
t1yzD,
qt1yzD,
t1yz_pyD,
qt1yz_pyD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM10.txt");
}
int gridSizeX12 = (nd2_tyy[3] - nd2_tyy[2])/blockSizeX + 1;
int gridSizeY12 = (nd2_tyy[9] - nd2_tyy[8])/blockSizeY + 1;
dim3 dimGrid12(gridSizeX12, gridSizeY12);
hipLaunchKernelGGL(( stress_norm_xy_II), dim3(dimGrid12), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
nd2_tyyD,
idmat2D,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
t2xxD,
t2xyD,
t2yyD,
t2zzD,
qt2xxD,
qt2xyD,
qt2yyD,
qt2zzD,
dxh2D,
dyh2D,
dxi2D,
dyi2D,
dzi2D,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM11.txt");
int gridSizeX13 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1;
int gridSizeY13 = (nd2_tyz[9] - nd2_tyz[8])/blockSizeY + 1;
dim3 dimGrid13(gridSizeX13, gridSizeY13);
hipLaunchKernelGGL(( stress_xz_yz_IIC), dim3(dimGrid13), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nztop,
*nxbtm,
*nzbtm,
nd2_tyzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
dzh2D,
t2xzD,
t2yzD,
qt2xzD,
qt2yzD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM12.txt");
if (lbx[1] >= lbx[0])
{
int gridSizeX14 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1;
int gridSizeY14 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid14(gridSizeX14, gridSizeY14);
hipLaunchKernelGGL(( stress_norm_PmlX_IIC), dim3(dimGrid14), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nztop,
*nxbtm,
*nybtm,
*nzbtm,
lbx[0],
lbx[1],
nd2_tyyD,
idmat2D,
*ca,
drti2D,
damp2_xD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pxD,
t2yy_pxD,
qt2xx_pxD,
qt2yy_pxD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM13.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX15 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeX + 1;
int gridSizeY15 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid15(gridSizeX15, gridSizeY15);
hipLaunchKernelGGL(( stress_norm_PmlY_II), dim3(dimGrid15), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nztop,
*nxbtm,
*nzbtm,
*mw2_pml1,
lby[0],
lby[1],
nd2_tyyD,
idmat2D,
*ca,
drti2D,
damp2_yD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pyD,
t2yy_pyD,
qt2xx_pyD,
qt2yy_pyD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM14.txt");
}
int gridSizeX16 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1;
int gridSizeY16 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeY + 1;
dim3 dimGrid16(gridSizeX16, gridSizeY16);
hipLaunchKernelGGL(( stress_norm_PmlZ_IIC), dim3(dimGrid16), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nztop,
*nxbtm,
*nzbtm,
nd2_tyyD,
idmat2D,
*ca,
damp2_zD,
drth2D,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pzD,
t2zz_pzD,
qt2xx_pzD,
qt2zz_pzD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM15.txt");
if (lbx[1] >= lbx[0])
{
int gridSizeX17 = (nd2_txy[5] - nd2_txy[0])/blockSizeX + 1;
int gridSizeY17 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid17(gridSizeX17, gridSizeY17);
hipLaunchKernelGGL(( stress_xy_PmlX_IIC), dim3(dimGrid17), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nxbtm,
*nybtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_txyD,
idmat2D,
*ca,
drth2D,
damp2_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
t2xy_pxD,
qt2xy_pxD,
v2xD,
v2yD);
}
if (lby[1] >= lby[0])
{
int gridSizeX18 = (nd2_txy[11] - nd2_txy[6])/blockSizeX + 1;
int gridSizeY18 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid18(gridSizeX18, gridSizeY18);
hipLaunchKernelGGL(( stress_xy_PmlY_IIC), dim3(dimGrid18), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml1,
*nztop,
*nxbtm,
*nzbtm,
lby[0],
lby[1],
nd2_txyD,
idmat2D,
*ca,
drth2D,
damp2_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
t2xy_pyD,
qt2xy_pyD,
v2xD,
v2yD);
}
int gridSizeX19 = (nd2_txy[3] - nd2_txy[2])/blockSizeX + 1;
int gridSizeY19 = (nd2_txy[9] - nd2_txy[8])/blockSizeY + 1;
dim3 dimGrid19(gridSizeX19, gridSizeY19);
hipLaunchKernelGGL(( stress_xy_PmlZ_II), dim3(dimGrid19), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
nd2_txyD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
v2xD,
v2yD);
if (lbx[1] >= lbx[0])
{
int gridSizeX20 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1;
int gridSizeY20 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid20(gridSizeX20, gridSizeY20);
hipLaunchKernelGGL(( stress_xz_PmlX_IIC), dim3(dimGrid20), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nxbtm,
*nybtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_txzD,
idmat2D,
*ca,
drth2D,
damp2_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
t2xzD,
qt2xzD,
t2xz_pxD,
qt2xz_pxD,
v2xD,
v2zD);
}
if (lby[1] >= lby[0])
{
int gridSizeX21 = (nd2_txz[9] - nd2_txz[8])/blockSizeX + 1;
int gridSizeY21 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid21(gridSizeX21, gridSizeY21);
hipLaunchKernelGGL(( stress_xz_PmlY_IIC), dim3(dimGrid21), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
lby[0],
lby[1],
nd2_txzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
v2xD,
v2zD,
t2xzD,
qt2xzD);
}
int gridSizeX22 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1;
int gridSizeY22 = (nd2_txz[11] - nd2_txz[6])/blockSizeY + 1;
dim3 dimGrid22(gridSizeX22, gridSizeY22);
hipLaunchKernelGGL(( stress_xz_PmlZ_IIC), dim3(dimGrid22), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
nd2_txzD,
idmat2D,
*ca,
drti2D,
damp2_zD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
t2xzD,
qt2xzD,
t2xz_pzD,
qt2xz_pzD,
v2xD,
v2zD);
if (lbx[1] >= lbx[0])
{
int gridSizeX23 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1;
int gridSizeY23 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid23(gridSizeX23, gridSizeY23);
hipLaunchKernelGGL(( stress_yz_PmlX_IIC), dim3(dimGrid23), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_tyzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
v2yD,
v2zD);
}
if (lby[1] >= lby[0])
{
int gridSizeX24 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeX + 1;
int gridSizeY24 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid24(gridSizeX24, gridSizeY24);
hipLaunchKernelGGL(( stress_yz_PmlY_IIC), dim3(dimGrid24), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
lby[0],
lby[1],
nd2_tyzD,
idmat2D,
*ca,
drth2D,
damp2_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
t2yz_pyD,
qt2yz_pyD,
v2yD,
v2zD);
}
int gridSizeX25 = (nd2_tyz[5] - nd2_tyz[0])/blockSizeX + 1;
int gridSizeY25 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeY + 1;
dim3 dimGrid25(gridSizeX25, gridSizeY25);
hipLaunchKernelGGL(( stress_yz_PmlZ_IIC), dim3(dimGrid25), dim3(dimBlock), 0, 0, *nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
nd2_tyzD,
idmat2D,
*ca,
drti2D,
damp2_zD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
t2yz_pzD,
qt2yz_pzD,
v2yD,
v2zD);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeCompS += tmpTime;
gettimeofday(&t1, NULL);
cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM,
t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeD2HS += tmpTime;
// for debug
// int size = (*nztop) * (*nxtop + 3) * (*nytop);
// write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM.txt");
// size = (*nztop) * (*nxtop + 3) * (*nytop + 3);
// write_output(t1xyM, size, "OUTPUT_ARRAYS/t1xyM.txt");
// size = (*nztop + 1) * (*nxtop + 3) * (*nytop);
// write_output(t1xzM, size, "OUTPUT_ARRAYS/t1xzM.txt");
// size = (*nztop) * (*nxtop) * (*nytop + 3);
// write_output(t1yyM, size, "OUTPUT_ARRAYS/t1yyM.txt");
// size = (*nztop + 1) * (*nxtop) * (*nytop + 3);
// write_output(t1yzM, size, "OUTPUT_ARRAYS/t1yzM.txt");
// size = (*nztop) * (*nxtop) * (*nytop);
// write_output(t1zzM, size, "OUTPUT_ARRAYS/t1zzM.txt");
// size = (*nzbtm) * (*nxbtm + 3) * (*nybtm);
// write_output(t2xxM, size, "OUTPUT_ARRAYS/t2xxM.txt");
// size = (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3);
// write_output(t2xyM, size, "OUTPUT_ARRAYS/t2xyM.txt");
// size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm);
// write_output(t2xzM, size, "OUTPUT_ARRAYS/t2xzM.txt");
// size = (*nzbtm) * (*nxbtm) * (*nybtm + 3);
// write_output(t2yyM, size, "OUTPUT_ARRAYS/t2yyM.txt");
// size = (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3);
// write_output(t2yzM, size, "OUTPUT_ARRAYS/t2yzM.txt");
// size = (*nzbtm + 1) * (*nxbtm) * (*nybtm);
// write_output(t2zzM, size, "OUTPUT_ARRAYS/t2zzM.txt");
/*************** correctness *******************/
/*
FILE *fp;
// cudaRes = hipMalloc((void **)&v1xD, sizeof(float) * y(*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x");
// cudaRes = hipMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y");
// cudaRes = hipMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z");
const char* filename = "v1x.txt";
const char* filename1 = "v1y.txt";
const char* filename2 = "v1z.txt";
int i;
if((fp = fopen(filename, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1xM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename1, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1yM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename2, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1zM[i]);
}
fprintf(fp, "\n");
fclose(fp);
// cudaRes = hipMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx");
// cudaRes = hipMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy");
// cudaRes = hipMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz");
// cudaRes = hipMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy");
// cudaRes = hipMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz");
// cudaRes = hipMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz");
const char* filename3 = "x_t1xx.txt";
const char* filename4 = "x_t1xy.txt";
const char* filename5 = "x_t1xz.txt";
if((fp = fopen(filename3, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop); i++ )
{
fprintf(fp, "%f ", t1xxM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename4, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop+3); i++ )
{
fprintf(fp, "%f ", t1xyM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename5, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop+1) * (*nxtop + 3) * (*nytop); i++ )
{
fprintf(fp, "%f ", t1xzM[i]);
}
fprintf(fp, "\n");
fclose(fp);
*/
return;
}
#ifdef __cplusplus
}
#endif
| 11c258dbbaa5cb022b779baa2c04d103f7af3790.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
/***********************************************/
/* for debug: check the output */
/***********************************************/
void write_output(float *arr, int size, const char *filename)
{
FILE *fp;
if((fp = fopen(filename, "w+")) == NULL)
{
fprintf(stderr, "File write error!\n");
}
int i;
for(i = 0; i < size; i++)
{
fprintf(fp, "%f ", arr[i]);
if( i%10 == 0)
fprintf(fp, "\n");
}
fprintf(fp, "\n");
fclose(fp);
}
//device memory pointers
static int *nd1_velD;
static int *nd1_txyD;
static int *nd1_txzD;
static int *nd1_tyyD;
static int *nd1_tyzD;
static float *rhoD;
static float *drvh1D;
static float *drti1D;
static float *drth1D;
static float *damp1_xD;
static float *damp1_yD;
static int *idmat1D;
static float *dxi1D;
static float *dyi1D;
static float *dzi1D;
static float *dxh1D;
static float *dyh1D;
static float *dzh1D;
static float *t1xxD;
static float *t1xyD;
static float *t1xzD;
static float *t1yyD;
static float *t1yzD;
static float *t1zzD;
static float *t1xx_pxD;
static float *t1xy_pxD;
static float *t1xz_pxD;
static float *t1yy_pxD;
static float *qt1xx_pxD;
static float *qt1xy_pxD;
static float *qt1xz_pxD;
static float *qt1yy_pxD;
static float *t1xx_pyD;
static float *t1xy_pyD;
static float *t1yy_pyD;
static float *t1yz_pyD;
static float *qt1xx_pyD;
static float *qt1xy_pyD;
static float *qt1yy_pyD;
static float *qt1yz_pyD;
static float *qt1xxD;
static float *qt1xyD;
static float *qt1xzD;
static float *qt1yyD;
static float *qt1yzD;
static float *qt1zzD;
static float *clamdaD;
static float *cmuD;
static float *epdtD;
static float *qwpD;
static float *qwsD;
static float *qwt1D;
static float *qwt2D;
static float *v1xD; //output
static float *v1yD;
static float *v1zD;
static float *v1x_pxD;
static float *v1y_pxD;
static float *v1z_pxD;
static float *v1x_pyD;
static float *v1y_pyD;
static float *v1z_pyD;
//for inner_II---------------------------------------------------------
static int *nd2_velD;
static int *nd2_txyD; //int[18]
static int *nd2_txzD; //int[18]
static int *nd2_tyyD; //int[18]
static int *nd2_tyzD; //int[18]
static float *drvh2D;
static float *drti2D;
static float *drth2D; //float[mw2_pml1,0:1]
static int *idmat2D;
static float *damp2_xD;
static float *damp2_yD;
static float *damp2_zD;
static float *dxi2D;
static float *dyi2D;
static float *dzi2D;
static float *dxh2D;
static float *dyh2D;
static float *dzh2D;
static float *t2xxD;
static float *t2xyD;
static float *t2xzD;
static float *t2yyD;
static float *t2yzD;
static float *t2zzD;
static float *qt2xxD;
static float *qt2xyD;
static float *qt2xzD;
static float *qt2yyD;
static float *qt2yzD;
static float *qt2zzD;
static float *t2xx_pxD;
static float *t2xy_pxD;
static float *t2xz_pxD;
static float *t2yy_pxD;
static float *qt2xx_pxD;
static float *qt2xy_pxD;
static float *qt2xz_pxD;
static float *qt2yy_pxD;
static float *t2xx_pyD;
static float *t2xy_pyD;
static float *t2yy_pyD;
static float *t2yz_pyD;
static float *qt2xx_pyD;
static float *qt2xy_pyD;
static float *qt2yy_pyD;
static float *qt2yz_pyD;
static float *t2xx_pzD;
static float *t2xz_pzD;
static float *t2yz_pzD;
static float *t2zz_pzD;
static float *qt2xx_pzD;
static float *qt2xz_pzD;
static float *qt2yz_pzD;
static float *qt2zz_pzD;
static float *v2xD; //output
static float *v2yD;
static float *v2zD;
static float *v2x_pxD;
static float *v2y_pxD;
static float *v2z_pxD;
static float *v2x_pyD;
static float *v2y_pyD;
static float *v2z_pyD;
static float *v2x_pzD;
static float *v2y_pzD;
static float *v2z_pzD;
#define CHECK_ERROR(err, str) \
if (err != cudaSuccess) \
{\
printf("Error in \"%s\", %s\n", str, cudaGetErrorString(err)); \
}
//debug----------------------
double totalTimeH2DV, totalTimeD2HV;
double totalTimeH2DS, totalTimeD2HS;
double totalTimeCompV, totalTimeCompS;
double tmpTime;
struct timeval t1, t2;
int procID;
//--------------------------------
//!XSC--------------------------------------------------------------------
#define drvh1(i, j) drvh1M[(i) - 1 + (j) * mw1_pml1]
#define drti1(i, j) drti1M[(i) - 1 + (j) * mw1_pml1]
#define drth1(i, j) drth1M[(i) - 1 + (j) * mw1_pml1]
#define damp1_x(i, j, k) damp1_xM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lbx0) * nytop)]
#define damp1_y(i, j, k) damp1_yM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lby0) * nxtop)]
#define idmat1(i, j, k) idmat1M[(i) + (nztop + 2) * ((j) - 1 + ((k) - 1) * (nxtop + 1))]
#define v1x(i, j, k) v1xM[(i) + (nztop + 2) * ((j) + 1 + (k) * (nxtop + 3))]
#define v1y(i, j, k) v1yM[(i) + (nztop + 2) * ((j) + ((k) + 1) * (nxtop + 3))]
#define v1z(i, j, k) v1zM[(i) + (nztop + 2) * ((j) + (k) * (nxtop + 3))]
//nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml
#define v1x_px(i, j, k) v1x_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1y_px(i, j, k) v1y_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1z_px(i, j, k) v1z_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))]
#define v1x_py(i, j, k) v1x_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define v1y_py(i, j, k) v1y_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define v1z_py(i, j, k) v1z_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define dxi1(i, j) dxi1M[((j) - 1) * 4 + (i) - 1]
#define dyi1(i, j) dyi1M[((j) - 1) * 4 + (i) - 1]
#define dzi1(i, j) dzi1M[((j) - 1) * 4 + (i) - 1]
#define dxh1(i, j) dxh1M[((j) - 1) * 4 + (i) - 1]
#define dyh1(i, j) dyh1M[((j) - 1) * 4 + (i) - 1]
#define dzh1(i, j) dzh1M[((j) - 1) * 4 + (i) - 1]
#define t1xx(i, j, k) t1xxM[(i) - 1 + nztop * ((j) + ((k) - 1) * (nxtop + 3))]
#define t1xy(i, j, k) t1xyM[(i) - 1 + nztop * ((j) + 1 + ((k) + 1) * (nxtop + 3))]
#define t1xz(i, j, k) t1xzM[(i) - 1 + (nztop + 1) * ((j) + 1 + ((k) - 1) * (nxtop + 3))]
#define t1yy(i, j, k) t1yyM[(i) - 1 + nztop * (((j) - 1) + (k) * nxtop)]
#define t1yz(i, j, k) t1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) + 1) * nxtop)]
#define t1zz(i, j, k) t1zzM[(i) - 1 + nztop * ((j) - 1 + ((k) - 1) * nxtop)]
//nti = (lbx(2) - lbx(1) + 1) * mw1_pml + lbx(2)
//nth = (lbx(2) - lbx(1) + 1) * mw1_pml + 1 - lbx(1)
#define t1xx_px(i, j, k) t1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define t1xy_px(i, j, k) t1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))]
#define t1xz_px(i, j, k) t1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))]
#define t1yy_px(i, j, k) t1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define qt1xx_px(i, j, k) qt1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
#define qt1xy_px(i, j, k) qt1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))]
#define qt1xz_px(i, j, k) qt1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))]
#define qt1yy_px(i, j, k) qt1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))]
//nti = (lby(2) - lby(1) + 1) * mw1_pml + lby(2)
//nth = (lby(2) - lby(1) + 1) * mw1_pml + 1 - lby(1)
#define t1xx_py(i, j, k) t1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1xy_py(i, j, k) t1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1yy_py(i, j, k) t1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define t1yz_py(i, j, k) t1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xx_py(i, j, k) qt1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xy_py(i, j, k) qt1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yy_py(i, j, k) qt1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yz_py(i, j, k) qt1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xx(i, j, k) qt1xxM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xy(i, j, k) qt1xyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1xz(i, j, k) qt1xzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yy(i, j, k) qt1yyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1yz(i, j, k) qt1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))]
#define qt1zz(i, j, k) qt1zzM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))]
#define rho(i) rhoM[(i) - 1]
#define clamda(i) clamdaM[(i) - 1]
#define cmu(i) cmuM[(i) - 1]
#define epdt(i) epdtM[(i) - 1]
#define qwp(i) qwpM[(i) - 1]
#define qws(i) qwsM[(i) - 1]
#define qwt1(i) qwt1M[(i) - 1]
#define qwt2(i) qwt2M[(i) - 1]
//for inner_II
#define drvh2(i, j) drvh2M[(i) - 1 + (j) * mw2_pml1]
#define drti2(i, j) drti2M[(i) - 1 + (j) * mw2_pml1]
#define drth2(i, j) drth2M[(i) - 1 + (j) * mw2_pml1]
#define idmat2(i, j, k) idmat2M[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * (nxbtm + 1))]
#define damp2_x(i, j, k) damp2_xM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lbx0) * nybtm)]
#define damp2_y(i, j, k) damp2_yM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lby0) * nxbtm)]
#define damp2_z(i, j) damp2_zM[(i) - 1 + nxbtm * ((j) - 1)]
#define dxi2(i, j) dxi2M[(i) - 1 + 4 * ((j) - 1)]
#define dyi2(i, j) dyi2M[(i) - 1 + 4 * ((j) - 1)]
#define dzi2(i, j) dzi2M[(i) - 1 + 4 * ((j) - 1)]
#define dxh2(i, j) dxh2M[(i) - 1 + 4 * ((j) - 1)]
#define dyh2(i, j) dyh2M[(i) - 1 + 4 * ((j) - 1)]
#define dzh2(i, j) dzh2M[(i) - 1 + 4 * ((j) - 1)]
#define t2xx(i, j, k) t2xxM[(i) - 1 + nzbtm * ((j) + ((k) - 1) * (nxbtm + 3))]
#define t2xy(i, j, k) t2xyM[(i) - 1 + nzbtm * ((j) + 1 + ((k) + 1) * (nxbtm + 3))]
#define t2xz(i, j, k) t2xzM[(i) + (nzbtm + 1) * ((j) + 1 + ((k) - 1) * (nxbtm + 3))]
#define t2yy(i, j, k) t2yyM[(i) - 1 + nzbtm * (((j) - 1) + (k) * nxbtm)]
#define t2yz(i, j, k) t2yzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) + 1) * nxbtm)]
#define t2zz(i, j, k) t2zzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * nxbtm)]
#define qt2xx(i, j, k) qt2xxM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xy(i, j, k) qt2xyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xz(i, j, k) qt2xzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yy(i, j, k) qt2yyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz(i, j, k) qt2yzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2zz(i, j, k) qt2zzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
//nti = (lbx(2) - lbx(1) + 1) * mw2_pml + lbx(2)
//nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1)
#define t2xx_px(i, j, k) t2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define t2xy_px(i, j, k) t2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define t2xz_px(i, j, k) t2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define t2yy_px(i, j, k) t2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define t2xx_py(i, j, k) t2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xy_py(i, j, k) t2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yy_py(i, j, k) t2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yz_py(i, j, k) t2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xx_pz(i, j, k) t2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2xz_pz(i, j, k) t2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2yz_pz(i, j, k) t2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define t2zz_pz(i, j, k) t2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xx_px(i, j, k) qt2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define qt2xy_px(i, j, k) qt2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define qt2xz_px(i, j, k) qt2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))]
#define qt2yy_px(i, j, k) qt2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))]
#define qt2xx_py(i, j, k) qt2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xy_py(i, j, k) qt2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yy_py(i, j, k) qt2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz_py(i, j, k) qt2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xx_pz(i, j, k) qt2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2xz_pz(i, j, k) qt2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2yz_pz(i, j, k) qt2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))]
#define qt2zz_pz(i, j, k) qt2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2x(i, j, k) v2xM[(i) + (nzbtm + 1) * ((j) + 1 + (nxbtm + 3) * (k))]
#define v2y(i, j, k) v2yM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * ((k) + 1))]
#define v2z(i, j, k) v2zM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * (k))]
//nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml
#define v2x_px(i, j, k) v2x_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2y_px(i, j, k) v2y_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2z_px(i, j, k) v2z_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))]
#define v2x_py(i, j, k) v2x_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2y_py(i, j, k) v2y_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2z_py(i, j, k) v2z_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2x_pz(i, j, k) v2x_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2y_pz(i, j, k) v2y_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
#define v2z_pz(i, j, k) v2z_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))]
__global__ void velocity_inner_IC(int nztop,
int nztm1,
float ca,
int *nd1_vel,
float *rhoM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int nxtop, //dimension #
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM);
__global__ void velocity_inner_IIC(float ca,
int *nd2_vel,
float *rhoM,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
int *idmat2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int nxbtm, //dimension #s
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z);
__global__ void vel_PmlX_IC(float ca,
int lbx0,
int lbx1,
int *nd1_vel,
float *rhoM,
float *drvh1,
float *drti1,
float *damp1_x,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
int mw1_pml1, //dimension #
int mw1_pml,
int nxtop,
int nytop,
int nztop,
float *v1x, //output
float *v1y,
float *v1z,
float *v1x_px,
float *v1y_px,
float *v1z_px);
__global__ void vel_PmlY_IC(int nztop,
float ca,
int lby0,
int lby1,
int *nd1_vel,
float *rhoM,
float *drvh1,
float *drti1,
int *idmat1,
float *damp1_y,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
int mw1_pml1, //dimension #s
int mw1_pml,
int nxtop,
int nytop,
float *v1x, //output
float *v1y,
float *v1z,
float *v1x_py,
float *v1y_py,
float *v1z_py);
__global__ void vel_PmlX_IIC(int nzbm1,
float ca,
int lbx0,
int lbx1,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_x,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_px,
float *v2y_px,
float *v2z_px);
__global__ void vel_PmlY_IIC(int nzbm1,
float ca,
int lby0,
int lby1,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_y,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1,
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_py,
float *v2y_py,
float *v2z_py);
__global__ void vel_PmlZ_IIC(int nzbm1,
float ca,
int *nd2_vel,
float *drvh2,
float *drti2,
float *rhoM,
float *damp2_z,
int *idmat2,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2x, //output
float *v2y,
float *v2z,
float *v2x_pz,
float *v2y_pz,
float *v2z_pz);
#ifdef __cplusplus
extern "C" {
#endif
extern void compute_velocityCDebug( int *nztop, int *nztm1, float *ca, int *lbx,
int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M,
float *damp1_xM, float *damp1_yM, int *idmat1M,float *dxi1M, float *dyi1M,
float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM,
float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM,
void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM,
float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM,
int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M,
int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M,
float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM,
float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp,
float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM,
float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM,
float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1,
int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml,
int *nxbtm, int *nybtm, int *nzbtm);
extern void compute_stressCDebug(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml,
int *mw1_pml1, int *lbx, int *lby, int *nd1_txy, int *nd1_txz,
int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM,
float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M,
float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM,
float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM,
float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM,
float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM,
float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp,
int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz,
int *nd2_tyy, int *nd2_tyz, int *idmat2M,
float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM,
float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM,
float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M,
float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM,
float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM,
float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM,
float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM,
void **v2xMp, void **v2yMp, void **v2zMp, int *myid);
void set_deviceC(int *deviceID)
{
cudaSetDevice(*deviceID);
//printf("[CUDA] device set success!\n");
}
//===========================================================================
void allocate_gpu_memC(int *lbx,
int *lby,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nzbm1,
int *nll)
{
//printf("[CUDA] allocation ...............");
int nv2, nti, nth;
cudaError_t cudaRes;
// printf("lbx[1] = %d, lbx[0] = %d\n", lbx[1], lbx[0]);
// printf("lby[1] = %d, lby[0] = %d\n", lby[1], lby[0]);
// printf("nmat = %d\n", *nmat);
// printf("mw1_pml1 = %d, mw2_pml1 = %d\n", *mw1_pml1, *mw2_pml1);
// printf("mw1_pml = %d, mw2_pml = %d\n", *mw1_pml, *mw2_pml);
// printf("nxtop = %d, nytop = %d, nztop = %d\n", *nxtop, *nytop, *nztop);
// printf("nxbtm = %d, nybtm = %d, nzbtm = %d\n", *nxbtm, *nybtm, *nzbtm);
// printf("nzbm1 = %d, nll = %d\n", *nzbm1, *nll);
//debug-----------------
totalTimeH2DV = 0.0f;
totalTimeD2HV = 0.0f;
totalTimeH2DS = 0.0f;
totalTimeD2HS = 0.0f;
totalTimeCompV = 0.0f;
totalTimeCompS = 0.0f;
//for inner_I
cudaRes = cudaMalloc((void **)&nd1_velD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_vel");
cudaRes = cudaMalloc((void **)&nd1_txyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txy");
cudaRes = cudaMalloc((void **)&nd1_txzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txz");
cudaRes = cudaMalloc((void **)&nd1_tyyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyy");
cudaRes = cudaMalloc((void **)&nd1_tyzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyz");
cudaRes = cudaMalloc((void **)&rhoD, sizeof(float) * (*nmat));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, rho");
cudaRes = cudaMalloc((void **)&drvh1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drvh1");
cudaRes = cudaMalloc((void **)&drti1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drti1");
cudaRes = cudaMalloc((void **)&drth1D, sizeof(float) * (*mw1_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, drth1");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMalloc((void **)&damp1_xD, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMalloc((void **)&damp1_yD, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_y");
}
cudaRes = cudaMalloc((void **)&idmat1D, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, idmat1");
cudaRes = cudaMalloc((void **)&dxi1D, sizeof(float) * 4 * (*nxtop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxi1");
cudaRes = cudaMalloc((void **)&dyi1D, sizeof(float) * 4 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyi1");
cudaRes = cudaMalloc((void **)&dzi1D, sizeof(float) * 4 * (*nztop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzi1");
cudaRes = cudaMalloc((void **)&dxh1D, sizeof(float) * 4 * (*nxtop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxh1");
cudaRes = cudaMalloc((void **)&dyh1D, sizeof(float) * 4 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyh1");
cudaRes = cudaMalloc((void **)&dzh1D, sizeof(float) * 4 * (*nztop + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzh1");
cudaRes = cudaMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx");
cudaRes = cudaMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy");
cudaRes = cudaMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz");
cudaRes = cudaMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy");
cudaRes = cudaMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz");
cudaRes = cudaMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz");
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0];
cudaMalloc((void **)&t1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop));
cudaMalloc((void **)&t1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop));
cudaMalloc((void **)&t1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop));
cudaMalloc((void **)&t1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop));
cudaMalloc((void **)&qt1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop));
cudaMalloc((void **)&qt1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop));
cudaMalloc((void **)&qt1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop));
cudaMalloc((void **)&qt1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop));
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0];
cudaMalloc((void **)&t1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
cudaMalloc((void **)&t1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth);
cudaMalloc((void **)&t1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
cudaMalloc((void **)&t1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth);
cudaMalloc((void **)&qt1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
cudaMalloc((void **)&qt1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth);
cudaMalloc((void **)&qt1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti);
cudaMalloc((void **)&qt1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth);
}
cudaMalloc((void **)&qt1xxD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
cudaMalloc((void **)&qt1xyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
cudaMalloc((void **)&qt1xzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop));
cudaMalloc((void **)&qt1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
cudaMalloc((void **)&qt1yzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop));
cudaMalloc((void **)&qt1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
cudaMalloc((void **)&clamdaD, sizeof(float) * (*nmat));
cudaMalloc((void **)&cmuD, sizeof(float) * (*nmat));
cudaMalloc((void **)&epdtD, sizeof(float) * (*nll));
cudaMalloc((void **)&qwpD, sizeof(float) * (*nmat));
cudaMalloc((void **)&qwsD, sizeof(float) * (*nmat));
cudaMalloc((void **)&qwt1D, sizeof(float) * (*nll));
cudaMalloc((void **)&qwt2D, sizeof(float) * (*nll));
cudaRes = cudaMalloc((void **)&v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x");
cudaRes = cudaMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y");
cudaRes = cudaMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml);
cudaRes = cudaMalloc((void **)&v1x_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_px");
cudaRes = cudaMalloc((void **)&v1y_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_px");
cudaRes = cudaMalloc((void **)&v1z_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop));
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml);
cudaRes = cudaMalloc((void **)&v1x_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_py");
cudaRes = cudaMalloc((void **)&v1y_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_py");
cudaRes = cudaMalloc((void **)&v1z_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_py");
}
//for inner_II-----------------------------------------------------------------------------------------
cudaRes = cudaMalloc((void **)&nd2_velD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_vel");
cudaRes = cudaMalloc((void **)&nd2_txyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txy");
cudaRes = cudaMalloc((void **)&nd2_txzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txz");
cudaRes = cudaMalloc((void **)&nd2_tyyD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyy");
cudaRes = cudaMalloc((void **)&nd2_tyzD, sizeof(int) * 18);
CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyz");
cudaRes = cudaMalloc((void **)&drvh2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drvh2");
cudaRes = cudaMalloc((void **)&drti2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drti2");
cudaRes = cudaMalloc((void **)&drth2D, sizeof(float) * (*mw2_pml1) * 2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, drth2");
cudaRes = cudaMalloc((void **)&idmat2D, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMalloc((void **)&damp2_xD, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMalloc((void **)&damp2_yD, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_y");
}
cudaRes = cudaMalloc((void **)&damp2_zD, sizeof(float) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_z");
cudaRes = cudaMalloc((void **)&dxi2D, sizeof(float) * 4 * (*nxbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dxi2");
cudaRes = cudaMalloc((void **)&dyi2D, sizeof(float) * 4 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dyi2");
cudaRes = cudaMalloc((void **)&dzi2D, sizeof(float) * 4 * (*nzbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dzi2");
cudaRes = cudaMalloc((void **)&dxh2D, sizeof(float) * 4 * (*nxbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dxh2");
cudaRes = cudaMalloc((void **)&dyh2D, sizeof(float) * 4 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dyh2");
cudaRes = cudaMalloc((void **)&dzh2D, sizeof(float) * 4 * (*nzbtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, dzh2");
cudaRes = cudaMalloc((void **)&t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xx");
cudaRes = cudaMalloc((void **)&t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xy");
cudaRes = cudaMalloc((void **)&t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xz");
cudaRes = cudaMalloc((void **)&t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yy");
cudaRes = cudaMalloc((void **)&t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yz");
cudaRes = cudaMalloc((void **)&t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, t2zz");
cudaMalloc((void **)&qt2xxD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2xyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2xzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2yzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2zzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm));
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0];
cudaMalloc((void **)&t2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
cudaMalloc((void **)&t2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
cudaMalloc((void **)&t2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
cudaMalloc((void **)&t2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
cudaMalloc((void **)&qt2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
cudaMalloc((void **)&qt2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
cudaMalloc((void **)&qt2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm));
cudaMalloc((void **)&qt2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm));
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0];
cudaMalloc((void **)&t2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
cudaMalloc((void **)&t2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
cudaMalloc((void **)&t2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
cudaMalloc((void **)&t2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
cudaMalloc((void **)&qt2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
cudaMalloc((void **)&qt2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
cudaMalloc((void **)&qt2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti);
cudaMalloc((void **)&qt2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth);
}
cudaMalloc((void **)&t2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&t2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&t2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&t2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&qt2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
cudaMalloc((void **)&v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
cudaMalloc((void **)&v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
cudaMalloc((void **)&v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3));
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml);
cudaRes = cudaMalloc((void **)&v2x_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_px");
cudaRes = cudaMalloc((void **)&v2y_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_px");
cudaRes = cudaMalloc((void **)&v2z_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml);
cudaRes = cudaMalloc((void **)&v2x_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_py");
cudaRes = cudaMalloc((void **)&v2y_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_py");
cudaRes = cudaMalloc((void **)&v2z_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2);
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_py");
}
cudaRes = cudaMalloc((void **)&v2x_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_pz");
cudaRes = cudaMalloc((void **)&v2y_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_pz");
cudaRes = cudaMalloc((void **)&v2z_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm));
CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_pz");
//printf("done!\n");
return;
}
void cpy_h2d_velocityInputsCOneTime(int *lbx,
int *lby,
int *nd1_vel,
float *rho,
float *drvh1,
float *drti1,
float *damp1_x,
float *damp1_y,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *v1x_px,
float *v1y_px,
float *v1z_px,
float *v1x_py,
float *v1y_py,
float *v1z_py,
int *nd2_vel,
float *drvh2,
float *drti2,
int *idmat2,
float *damp2_x,
float *damp2_y,
float *damp2_z,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
float *v2x_px,
float *v2y_px,
float *v2z_px,
float *v2x_py,
float *v2y_py,
float *v2z_py,
float *v2x_pz,
float *v2y_pz,
float *v2z_pz,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nzbm1)
{
//printf("[CUDA] initial h2d cpy for velocity ........");
cudaError_t cudaRes;
int nv2;
// int i;
// for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++)
// {
// printf("%f ", t2xy[i]);
// }
// printf("\n");
//for inner_I
cudaRes = cudaMemcpy(nd1_velD, nd1_vel, sizeof(int) * 18, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd1_vel");
cudaRes = cudaMemcpy(rhoD, rho, sizeof(float) * (*nmat), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, rho");
cudaRes = cudaMemcpy(drvh1D, drvh1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh1");
cudaRes = cudaMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti1");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMemcpy(damp1_xD, damp1_x,
sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMemcpy(damp1_yD, damp1_y,
sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_y");
}
cudaRes = cudaMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat1");
cudaRes = cudaMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi1");
cudaRes = cudaMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi1");
cudaRes = cudaMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi1");
cudaRes = cudaMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh1");
cudaRes = cudaMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh1");
cudaRes = cudaMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh1");
cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx");
cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy");
cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz");
cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy");
cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz");
cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml);
cudaRes = cudaMemcpy(v1x_pxD, v1x_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_px");
cudaRes = cudaMemcpy(v1y_pxD, v1y_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_px");
cudaRes = cudaMemcpy(v1z_pxD, v1z_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml);
cudaRes = cudaMemcpy(v1x_pyD, v1x_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_py");
cudaRes = cudaMemcpy(v1y_pyD, v1y_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_py");
cudaRes = cudaMemcpy(v1z_pyD, v1z_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_py");
}
//for inner_II
cudaRes = cudaMemcpy(nd2_velD, nd2_vel, sizeof(int) * 18, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd2_vel");
cudaRes = cudaMemcpy(drvh2D, drvh2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh2");
cudaRes = cudaMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti2");
cudaRes = cudaMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMemcpy(damp2_xD, damp2_x,
sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMemcpy(damp2_yD, damp2_y,
sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_y");
}
cudaRes = cudaMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_z");
cudaRes = cudaMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi2");
cudaRes = cudaMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi2");
cudaRes = cudaMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi2");
cudaRes = cudaMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh2");
cudaRes = cudaMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh2");
cudaRes = cudaMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh2");
cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx");
cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy");
cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz");
cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy");
cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz");
cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz");
if (lbx[1] >= lbx[0])
{
nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml);
cudaRes = cudaMemcpy(v2x_pxD, v2x_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_px");
cudaRes = cudaMemcpy(v2y_pxD, v2y_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_px");
cudaRes = cudaMemcpy(v2z_pxD, v2z_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_px");
}
if (lby[1] >= lby[0])
{
nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml);
cudaRes = cudaMemcpy(v2x_pyD, v2x_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_py");
cudaRes = cudaMemcpy(v2y_pyD, v2y_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_py");
cudaRes = cudaMemcpy(v2z_pyD, v2z_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_py");
}
cudaRes = cudaMemcpy(v2x_pzD, v2x_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_pz");
cudaRes = cudaMemcpy(v2y_pzD, v2y_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_pz");
cudaRes = cudaMemcpy(v2z_pzD, v2z_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_pz");
//printf("done!\n");
return;
}
void cpy_h2d_velocityInputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for input ..........");
cudaError_t cudaRes;
//for inner_I
cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx");
cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy");
cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz");
cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy");
cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz");
cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz");
//for inner_II
cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx");
cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy");
cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz");
cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy");
cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz");
cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz");
//printf("done!\n");
return;
}
//=====================================================================
void cpy_h2d_stressInputsCOneTime(int *lbx,
int *lby,
int *nd1_txy,
int *nd1_txz,
int *nd1_tyy,
int *nd1_tyz,
float *drti1,
float *drth1,
float *damp1_x,
float *damp1_y,
int *idmat1,
float *dxi1,
float *dyi1,
float *dzi1,
float *dxh1,
float *dyh1,
float *dzh1,
float *v1x,
float *v1y,
float *v1z,
float *t1xx_px,
float *t1xy_px,
float *t1xz_px,
float *t1yy_px,
float *qt1xx_px,
float *qt1xy_px,
float *qt1xz_px,
float *qt1yy_px,
float *t1xx_py,
float *t1xy_py,
float *t1yy_py,
float *t1yz_py,
float *qt1xx_py,
float *qt1xy_py,
float *qt1yy_py,
float *qt1yz_py,
float *qt1xx,
float *qt1xy,
float *qt1xz,
float *qt1yy,
float *qt1yz,
float *qt1zz,
float *clamda,
float *cmu,
float *epdt,
float *qwp,
float *qws,
float *qwt1,
float *qwt2,
int *nd2_txy,
int *nd2_txz,
int *nd2_tyy,
int *nd2_tyz,
float *drti2,
float *drth2,
int *idmat2,
float *damp2_x,
float *damp2_y,
float *damp2_z,
float *dxi2,
float *dyi2,
float *dzi2,
float *dxh2,
float *dyh2,
float *dzh2,
float *v2x,
float *v2y,
float *v2z,
float *qt2xx,
float *qt2xy,
float *qt2xz,
float *qt2yy,
float *qt2yz,
float *qt2zz,
float *t2xx_px,
float *t2xy_px,
float *t2xz_px,
float *t2yy_px,
float *qt2xx_px,
float *qt2xy_px,
float *qt2xz_px,
float *qt2yy_px,
float *t2xx_py,
float *t2xy_py,
float *t2yy_py,
float *t2yz_py,
float *qt2xx_py,
float *qt2xy_py,
float *qt2yy_py,
float *qt2yz_py,
float *t2xx_pz,
float *t2xz_pz,
float *t2yz_pz,
float *t2zz_pz,
float *qt2xx_pz,
float *qt2xz_pz,
float *qt2yz_pz,
float *qt2zz_pz,
int *nmat, //dimension #, int
int *mw1_pml1, //int
int *mw2_pml1, //int
int *nxtop, //int
int *nytop, //int
int *nztop,
int *mw1_pml, //int
int *mw2_pml, //int
int *nxbtm, //int
int *nybtm, //int
int *nzbtm,
int *nll)
{
//printf("[CUDA] initial h2d cpy for stress ...........");
cudaError_t cudaRes;
int nti, nth;
//for inner_I
cudaRes = cudaMemcpy(nd1_txyD, nd1_txy, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd1_txzD, nd1_txz, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd1_tyyD, nd1_tyy, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd1_tyzD, nd1_tyz, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti1");
cudaRes = cudaMemcpy(drth1D, drth1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth1");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMemcpy(damp1_xD, damp1_x,
sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMemcpy(damp1_yD, damp1_y,
sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_y");
}
cudaRes = cudaMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat1");
cudaRes = cudaMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi1");
cudaRes = cudaMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi1");
cudaRes = cudaMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi1");
cudaRes = cudaMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh1");
cudaRes = cudaMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh1");
cudaRes = cudaMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh1");
cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0];
cudaMemcpy(t1xx_pxD, t1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), cudaMemcpyHostToDevice);
//debug
//write_output(t1xx_px, (*nztop) * (nti) * (*nytop), "OUTPUT_ARRAYS/t1xx_px_cuda.txt");
cudaMemcpy(t1xy_pxD, t1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(t1xz_pxD, t1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(t1yy_pxD, t1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1xx_pxD, qt1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1xy_pxD, qt1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1xz_pxD, qt1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1yy_pxD, qt1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), cudaMemcpyHostToDevice);
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0];
cudaMemcpy(t1xx_pyD, t1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(t1xy_pyD, t1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(t1yy_pyD, t1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(t1yz_pyD, t1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(qt1xx_pyD, qt1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(qt1xy_pyD, qt1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(qt1yy_pyD, qt1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(qt1yz_pyD, qt1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, cudaMemcpyHostToDevice);
}
cudaMemcpy(qt1xxD, qt1xx, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1xyD, qt1xy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1xzD, qt1xz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1yyD, qt1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1yzD, qt1yz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(qt1zzD, qt1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
cudaMemcpy(clamdaD, clamda, sizeof(float) * (*nmat), cudaMemcpyHostToDevice);
cudaMemcpy(cmuD, cmu, sizeof(float) * (*nmat), cudaMemcpyHostToDevice);
cudaMemcpy(epdtD, epdt, sizeof(float) * (*nll), cudaMemcpyHostToDevice);
cudaMemcpy(qwpD, qwp, sizeof(float) * (*nmat), cudaMemcpyHostToDevice);
cudaMemcpy(qwsD, qws, sizeof(float) * (*nmat), cudaMemcpyHostToDevice);
cudaMemcpy(qwt1D, qwt1, sizeof(float) * (*nll), cudaMemcpyHostToDevice);
cudaMemcpy(qwt2D, qwt2, sizeof(float) * (*nll), cudaMemcpyHostToDevice);
//for inner_II
cudaRes = cudaMemcpy(nd2_txyD, nd2_txy, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd2_txzD, nd2_txz, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd2_tyyD, nd2_tyy, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(nd2_tyzD, nd2_tyz, sizeof(int) * 18, cudaMemcpyHostToDevice);
cudaRes = cudaMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti2");
cudaRes = cudaMemcpy(drth2D, drth2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth2");
cudaRes = cudaMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat2");
if (lbx[1] >= lbx[0])
{
cudaRes = cudaMemcpy(damp2_xD, damp2_x,
sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_x");
}
if (lby[1] >= lby[0])
{
cudaRes = cudaMemcpy(damp2_yD, damp2_y,
sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1),
cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_y");
}
cudaRes = cudaMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_z");
cudaRes = cudaMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi2");
cudaRes = cudaMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi2");
cudaRes = cudaMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi2");
cudaRes = cudaMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh2");
cudaRes = cudaMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh2");
cudaRes = cudaMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh2");
cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xxD, qt2xx, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xyD, qt2xy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xzD, qt2xz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2yyD, qt2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2yzD, qt2yz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2zzD, qt2zz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
if (lbx[1] >= lbx[0])
{
nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1];
nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0];
cudaMemcpy(t2xx_pxD, t2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2xy_pxD, t2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2xz_pxD, t2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2yy_pxD, t2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xx_pxD, qt2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xy_pxD, qt2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xz_pxD, qt2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2yy_pxD, qt2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice);
}
if (lby[1] >= lby[0])
{
nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1];
nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0];
cudaMemcpy(t2xx_pyD, t2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(t2xy_pyD, t2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(t2yy_pyD, t2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(t2yz_pyD, t2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(qt2xx_pyD, qt2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(qt2xy_pyD, qt2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice);
cudaMemcpy(qt2yy_pyD, qt2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice);
cudaMemcpy(qt2yz_pyD, qt2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice);
}
cudaMemcpy(t2xx_pzD, t2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2xz_pzD, t2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2yz_pzD, t2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(t2zz_pzD, t2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xx_pzD, qt2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2xz_pzD, qt2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2yz_pzD, qt2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
cudaMemcpy(qt2zz_pzD, qt2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
//printf("done!\n");
return;
}
void cpy_h2d_stressInputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for input ..............");
cudaError_t cudaRes;
//for inner_I
cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
//for inner_II
cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
//printf("done!\n");
return;
}
//=====================================================================
void cpy_h2d_velocityOutputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for output .........");
cudaError_t cudaRes;
//for inner_I
cudaRes = cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x");
cudaRes = cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y");
cudaRes = cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z");
//for inner_II
cudaRes = cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x");
cudaRes = cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y");
cudaRes = cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z");
//printf("done!\n");
return;
}
//=====================================================================
void cpy_d2h_velocityOutputsC(float *v1x,
float *v1y,
float *v1z,
float *v2x,
float *v2y,
float *v2z,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] d2h cpy for output .........");
cudaError_t cudaRes;
//for inner_I
cudaRes = cudaMemcpy(v1x, v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1x");
cudaRes = cudaMemcpy(v1y, v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1y");
cudaRes = cudaMemcpy(v1z, v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1z");
//for inner_II
cudaRes = cudaMemcpy(v2x, v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2x");
cudaRes = cudaMemcpy(v2y, v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2y");
cudaRes = cudaMemcpy(v2z, v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, vzz");
//printf("done!\n");
return;
}
void cpy_h2d_stressOutputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] h2d cpy for output ..............");
cudaError_t cudaRes;
int nth, nti;
cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xx");
cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xy");
cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xz");
cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yy");
cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yz");
cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1zz");
//for inner_II
cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xx");
cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xy");
cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xz");
cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yy");
cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yz");
cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice);
CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2zz");
//printf("done!\n");
return;
}
void cpy_d2h_stressOutputsC(float *t1xx,
float *t1xy,
float *t1xz,
float *t1yy,
float *t1yz,
float *t1zz,
float *t2xx,
float *t2xy,
float *t2xz,
float *t2yy,
float *t2yz,
float *t2zz,
int *nxtop,
int *nytop,
int *nztop,
int *nxbtm,
int *nybtm,
int *nzbtm)
{
//printf("[CUDA] stress cpy d2h for output .....");
// printf("\nnxtop=%d, nytop=%d, nztop=%d\n", *nxtop, *nytop, *nztop);
// printf("nxbtm=%d, nybtm=%d, nzbtm=%d\n", *nxbtm, *nybtm, *nzbtm);
cudaError_t cudaRes;
cudaRes = cudaMemcpy(t1xx, t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xx");
cudaRes = cudaMemcpy(t1xy, t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xy");
cudaRes = cudaMemcpy(t1xz, t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xz");
cudaRes = cudaMemcpy(t1yy, t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yy");
cudaRes = cudaMemcpy(t1yz, t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yz");
cudaRes = cudaMemcpy(t1zz, t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1zz");
cudaRes = cudaMemcpy(t2xx, t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xx");
cudaRes = cudaMemcpy(t2xy, t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xy");
cudaRes = cudaMemcpy(t2xz, t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xz");
cudaRes = cudaMemcpy(t2yy, t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yy");
cudaRes = cudaMemcpy(t2yz, t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yz");
cudaRes = cudaMemcpy(t2zz, t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyDeviceToHost);
CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2zz");
//printf("done!\n");
// int i;
// for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++)
// {
// //printf("%f ", t2xx[i]);
// }
// printf("\n");
return;
}
void free_device_memC(int *lbx, int *lby)
{
//debug---------------------------------------------------
printf("[CUDA] id = %d, vel, H2D =, %.3f, D2H =, %.3f, comp =, %lf\n", procID, totalTimeH2DV, totalTimeD2HV, totalTimeCompV);
printf("[CUDA] id = %d, str, H2D =, %.3f, D2H =, %.3f, comp =, %lf\n", procID, totalTimeH2DS, totalTimeD2HS, totalTimeCompS);
//-------------------------------------------------
cudaFree(nd1_velD);
cudaFree(nd1_txyD);
cudaFree(nd1_txzD);
cudaFree(nd1_tyyD);
cudaFree(nd1_tyzD);
cudaFree(rhoD);
cudaFree(drvh1D);
cudaFree(drti1D);
cudaFree(drth1D);
cudaFree(idmat1D);
cudaFree(dxi1D);
cudaFree(dyi1D);
cudaFree(dzi1D);
cudaFree(dxh1D);
cudaFree(dyh1D);
cudaFree(dzh1D);
cudaFree(t1xxD);
cudaFree(t1xyD);
cudaFree(t1xzD);
cudaFree(t1yyD);
cudaFree(t1yzD);
cudaFree(t1zzD);
cudaFree(v1xD); //output
cudaFree(v1yD);
cudaFree(v1zD);
if (lbx[1] >= lbx[0])
{
cudaFree(damp1_xD);
cudaFree(t1xx_pxD);
cudaFree(t1xy_pxD);
cudaFree(t1xz_pxD);
cudaFree(t1yy_pxD);
cudaFree(qt1xx_pxD);
cudaFree(qt1xy_pxD);
cudaFree(qt1xz_pxD);
cudaFree(qt1yy_pxD);
cudaFree(v1x_pxD);
cudaFree(v1y_pxD);
cudaFree(v1z_pxD);
}
if (lby[1] >= lby[0])
{
cudaFree(damp1_yD);
cudaFree(t1xx_pyD);
cudaFree(t1xy_pyD);
cudaFree(t1yy_pyD);
cudaFree(t1yz_pyD);
cudaFree(qt1xx_pyD);
cudaFree(qt1xy_pyD);
cudaFree(qt1yy_pyD);
cudaFree(qt1yz_pyD);
cudaFree(v1x_pyD);
cudaFree(v1y_pyD);
cudaFree(v1z_pyD);
}
cudaFree(qt1xxD);
cudaFree(qt1xyD);
cudaFree(qt1xzD);
cudaFree(qt1yyD);
cudaFree(qt1yzD);
cudaFree(qt1zzD);
cudaFree(clamdaD);
cudaFree(cmuD);
cudaFree(epdtD);
cudaFree(qwpD);
cudaFree(qwsD);
cudaFree(qwt1D);
cudaFree(qwt2D);
//-------------------------------------
cudaFree(nd2_velD);
cudaFree(nd2_txyD);
cudaFree(nd2_txzD);
cudaFree(nd2_tyyD);
cudaFree(nd2_tyzD);
cudaFree(drvh2D);
cudaFree(drti2D);
cudaFree(drth2D);
cudaFree(idmat2D);
cudaFree(damp2_zD);
cudaFree(dxi2D);
cudaFree(dyi2D);
cudaFree(dzi2D);
cudaFree(dxh2D);
cudaFree(dyh2D);
cudaFree(dzh2D);
cudaFree(t2xxD);
cudaFree(t2xyD);
cudaFree(t2xzD);
cudaFree(t2yyD);
cudaFree(t2yzD);
cudaFree(t2zzD);
cudaFree(qt2xxD);
cudaFree(qt2xyD);
cudaFree(qt2xzD);
cudaFree(qt2yyD);
cudaFree(qt2yzD);
cudaFree(qt2zzD);
if (lbx[1] >= lbx[0])
{
cudaFree(damp2_xD);
cudaFree(t2xx_pxD);
cudaFree(t2xy_pxD);
cudaFree(t2xz_pxD);
cudaFree(t2yy_pxD);
cudaFree(qt2xx_pxD);
cudaFree(qt2xy_pxD);
cudaFree(qt2xz_pxD);
cudaFree(qt2yy_pxD);
cudaFree(v2x_pxD);
cudaFree(v2y_pxD);
cudaFree(v2z_pxD);
}
if (lby[1] >= lby[0])
{
cudaFree(damp2_yD);
cudaFree(t2xx_pyD);
cudaFree(t2xy_pyD);
cudaFree(t2yy_pyD);
cudaFree(t2yz_pyD);
cudaFree(qt2xx_pyD);
cudaFree(qt2xy_pyD);
cudaFree(qt2yy_pyD);
cudaFree(qt2yz_pyD);
cudaFree(v2x_pyD);
cudaFree(v2y_pyD);
cudaFree(v2z_pyD);
}
cudaFree(t2xx_pzD);
cudaFree(t2xz_pzD);
cudaFree(t2yz_pzD);
cudaFree(t2zz_pzD);
cudaFree(qt2xx_pzD);
cudaFree(qt2xz_pzD);
cudaFree(qt2yz_pzD);
cudaFree(qt2zz_pzD);
cudaFree(v2xD); //output
cudaFree(v2yD);
cudaFree(v2zD);
cudaFree(v2x_pzD);
cudaFree(v2y_pzD);
cudaFree(v2z_pzD);
//printf("[CUDA] memory space is freed.\n");
return;
}
void compute_velocityC(int *nztop, int *nztm1, float *ca, int *lbx,
int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M,
float *damp1_xM, float *damp1_yM, int *idmat1M, float *dxi1M, float *dyi1M,
float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM,
float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM,
void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM,
float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM,
int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M,
int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M,
float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM,
float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp,
float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM,
float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM,
float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1,
int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml,
int *nxbtm, int *nybtm, int *nzbtm, int *myid)
{
//printf("[CUDA] velocity computation:\n");
//difine the dimensions of different kernels
int blockSizeX = 8;
int blockSizeY = 8;
float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM;
// extract specific input/output pointers
v1xM=(float *) *v1xMp;
v1yM=(float *) *v1yMp;
v1zM=(float *) *v1zMp;
v2xM=(float *) *v2xMp;
v2yM=(float *) *v2yMp;
v2zM=(float *) *v2zMp;
procID = *myid;
gettimeofday(&t1, NULL);
cpy_h2d_velocityInputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM,
t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
cpy_h2d_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeH2DV += tmpTime;
gettimeofday(&t1, NULL);
dim3 dimBlock(blockSizeX, blockSizeY);
int gridSizeX1 = (nd1_vel[3] - nd1_vel[2])/blockSizeX + 1;
int gridSizeY1 = (nd1_vel[9] - nd1_vel[8])/blockSizeY + 1;
dim3 dimGrid1(gridSizeX1, gridSizeY1);
// printf("myid = %d, grid1 = (%d, %d)\n", *myid, gridSizeX1, gridSizeY1);
//CUDA code
velocity_inner_IC<<<dimGrid1, dimBlock>>>(*nztop,
*nztm1,
*ca,
nd1_velD,
rhoD,
idmat1D,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*nxtop, //dimension #
*nytop,
v1xD, //output
v1yD,
v1zD);
// printf("velocity_inner_IC called!\n");
int gridSizeX2 = (nd1_vel[5] - nd1_vel[0])/blockSizeX + 1;
int gridSizeY2 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid2(gridSizeX2, gridSizeY2);
// printf("myid = %d, grid2 = (%d, %d)\n", *myid, gridSizeX2, gridSizeY2);
if (lbx[1] >= lbx[0])
{
vel_PmlX_IC<<<dimGrid2, dimBlock>>>(*ca,
lbx[0],
lbx[1],
nd1_velD,
rhoD,
drvh1D,
drti1D,
damp1_xD,
idmat1D,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*mw1_pml1, //dimension #
*mw1_pml,
*nxtop,
*nytop,
*nztop,
v1xD, //output
v1yD,
v1zD,
v1x_pxD,
v1y_pxD,
v1z_pxD);
// printf("vel_PmlX_IC called!\n");
}
int gridSizeX3 = (lby[1] - lby[0])/blockSizeX + 1;
int gridSizeY3 = (nd1_vel[11] - nd1_vel[6])/blockSizeY + 1;
dim3 dimGrid3(gridSizeX3, gridSizeY3);
// printf("myid = %d, grid3 = (%d, %d)\n", *myid, gridSizeX3, gridSizeY3);
if (lby[1] >= lby[0])
{
vel_PmlY_IC<<<dimGrid3, dimBlock>>>(*nztop,
*ca,
lby[0],
lby[1],
nd1_velD,
rhoD,
drvh1D,
drti1D,
idmat1D,
damp1_yD,
dxi1D,
dyi1D,
dzi1D,
dxh1D,
dyh1D,
dzh1D,
t1xxD,
t1xyD,
t1xzD,
t1yyD,
t1yzD,
t1zzD,
*mw1_pml1, //dimension #s
*mw1_pml,
*nxtop,
*nytop,
v1xD, //output
v1yD,
v1zD,
v1x_pyD,
v1y_pyD,
v1z_pyD);
// printf("vel_PmlY_IC called!\n");
}
int gridSizeX4 = (nd2_vel[3] - nd2_vel[2])/blockSizeX + 1;
int gridSizeY4 = (nd2_vel[9] - nd2_vel[8])/blockSizeY + 1;
dim3 dimGrid4(gridSizeX4, gridSizeY4);
// printf("myid = %d, grid4 = (%d, %d)\n", *myid, gridSizeX4, gridSizeY4);
velocity_inner_IIC<<<dimGrid4, dimBlock>>>(*ca,
nd2_velD,
rhoD,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
idmat2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD);
// printf("velocity_inner_IIC called!\n");
int gridSizeX5 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1;
int gridSizeY5 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid5(gridSizeX5, gridSizeY5);
// printf("myid = %d, grid5 = (%d, %d)\n", *myid, gridSizeX5, gridSizeY5);
if (lbx[1] >= lbx[0])
{
vel_PmlX_IIC<<<dimGrid5, dimBlock>>>(*nzbm1,
*ca,
lbx[0],
lbx[1],
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_xD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pxD,
v2y_pxD,
v2z_pxD);
// printf("vel_PmlX_IIC called!\n");
}
int gridSizeX6 = (lby[1] - lby[0])/blockSizeX + 1;
int gridSizeY6 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1;
dim3 dimGrid6(gridSizeX6, gridSizeY6);
// printf("myid = %d, grid = (%d, %d)\n", *myid, gridSizeX6, gridSizeY6);
if (lby[1] >= lby[0])
{
vel_PmlY_IIC<<<dimGrid6, dimBlock>>>(*nzbm1,
*ca,
lby[0],
lby[1],
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_yD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pyD,
v2y_pyD,
v2z_pyD);
// printf("vel_PmlY_IIC called!\n");
}
int gridSizeX7 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1;
int gridSizeY7 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1;
dim3 dimGrid7(gridSizeX7, gridSizeY7);
// printf("myid = %d, grid7 = (%d, %d)\n", *myid, gridSizeX7, gridSizeY7);
vel_PmlZ_IIC<<<dimGrid7, dimBlock>>>(*nzbm1,
*ca,
nd2_velD,
drvh2D,
drti2D,
rhoD,
damp2_zD,
idmat2D,
dxi2D,
dyi2D,
dzi2D,
dxh2D,
dyh2D,
dzh2D,
t2xxD,
t2xyD,
t2xzD,
t2yyD,
t2yzD,
t2zzD,
*mw2_pml1, //dimension #s
*mw2_pml,
*nxbtm,
*nybtm,
*nzbtm,
v2xD, //output
v2yD,
v2zD,
v2x_pzD,
v2y_pzD,
v2z_pzD);
// printf("vel_PmlZ_IIC called!\n");
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeCompV += tmpTime;
gettimeofday(&t1, NULL);
cpy_d2h_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeD2HV += tmpTime;
// for debug
// int size = (*nztop + 2) * (*nxtop + 3) * (*nytop + 3);
// write_output(v1xM, size, "OUTPUT_ARRAYS/v1xM.txt");
// write_output(v1yM, size, "OUTPUT_ARRAYS/v1yM.txt");
// write_output(v1zM, size, "OUTPUT_ARRAYS/v1zM.txt");
// size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3);
// write_output(v2xM, size, "OUTPUT_ARRAYS/v2xM.txt");
// write_output(v2yM, size, "OUTPUT_ARRAYS/v2yM.txt");
// write_output(v2zM, size, "OUTPUT_ARRAYS/v2zM.txt");
return;
}
#ifdef __cplusplus
}
#endif
__global__ void velocity_inner_IC(int nztop,
int nztm1,
float ca,
int *nd1_vel,
float *rhoM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int nxtop, //dimension #
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM)
{
int i, j, k, k3;
float dtxz, dtyz, dtzz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_vel[8];
if (j > nd1_vel[3] || i > nd1_vel[9])
{
return;
}
for (k3 = 1; k3 <= 3; k3++)
{
k=k3;
if(k3==3) k=nztop;
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j) -35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(2,i,j)+dzi1(3,k)*t1xz(3,i,j)+dzi1(4,k)*t1xz(4,i,j);
dtyz=dzi1(2,k)*t1yz(2,i,j)+dzi1(3,k)*t1yz(3,i,j)+dzi1(4,k)*t1yz(4,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j) +29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
v1x(k,i,j)=v1x(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))*
(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+
dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+dtxz);
v1y(k,i,j)=v1y(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))*
(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+
dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+dtyz);
v1z(k,i,j)=v1z(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))*
(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+
dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+dtzz);
}
for (k = 3; k <=nztm1; k++)
{
v1x(k,i,j)=v1x(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))*
(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+
dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+
dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j));
v1y(k,i,j)=v1y(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))*
(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+
dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+
dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j));
v1z(k,i,j)=v1z(k,i,j)+
0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))*
(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+
dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+
dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j));
}
return;
}
//-----------------------------------------------------------------------
__global__ void velocity_inner_IIC(float ca,
int *nd2_vel,
float *rhoM,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
int *idmat2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int nxbtm, //dimension #s
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM)
{
int i, j, k;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[8];
if (j > nd2_vel[3] || i > nd2_vel[9])
{
return;
}
//for (j = nd2_vel(3); j <= nd2_vel(4); j++)
//for (j = nd2_vel[2]; j <= nd2_vel[3]; j++)
//{
//for (i = nd2_vel[8]; i <= nd2_vel[9]; i++)
//{
k=1;
v2x(k,i,j)=v2x(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+
dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j));
v2y(k,i,j)=v2y(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+
dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j));
v2z(k,i,j)=v2z(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+
dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)));
//for (k = 2; k <= nd2_vel(16); k++)
for (k = 2; k <= nd2_vel[15]; k++)
{
v2x(k,i,j)=v2x(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+
dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j));
v2y(k,i,j)=v2y(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+
dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j));
v2z(k,i,j)=v2z(k,i,j)+
0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+
dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j));
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlX_IC(float ca,
int lbx0,
int lbx1,
int *nd1_vel,
float *rhoM,
float *drvh1M,
float *drti1M,
float *damp1_xM,
int *idmat1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int mw1_pml1, //dimension #
int mw1_pml,
int nxtop,
int nytop,
int nztop,
float *v1xM, //output
float *v1yM,
float *v1zM,
float *v1x_pxM,
float *v1y_pxM,
float *v1z_pxM)
{
// !Compute the velocities in region of PML-x-I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
int i,j,k,lb,ib,kb;
float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz,dtxy,dtyy,dtzy;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
//int nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml;
int nv2x=(lbx1 - lbx0 + 1) * mw1_pml;
//if ( lbx(1)>lbx(2) ) return;
if (lbx0 > lbx1)
{
return;
}
if (j > nd1_vel[5] || lb > lbx1)
{
return;
}
//calculate the value of ib
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_vel[6+4*k]; i <= nd1_vel[7+4*k]; i++)
{
ib++;
}
}
//for (j = nd1_vel(1); j <= nd1_vel(6); j++)
//for (j = nd1_vel[0]; j <= nd1_vel[5]; j++)
//{
//ib=0;
//for (lb = lbx(1); lb <= lbx(2); lb++)
//for (lb = lbx[0]; lb <= lbx[1]; lb++)
//{
kb=0;
//for (i = nd1_vel(7+4*lb); i <= nd1_vel(8+4*lb); i++)
for (i = nd1_vel[6+4*lb]; i <= nd1_vel[7+4*lb]; i++)
{
kb=kb+1;
ib=ib+1;
rth=drvh1(kb,lb);
rti=drti1(kb,lb);
for (k = 1; k <= nztop; k++)
{
damp0=damp1_x(k,j,lb);
dmpx2=1./(1.+rth*damp0);
dmpx1=dmpx2*2.-1.;
dmpyz2=1./(1.+rti*damp0);
dmpyz1=dmpyz2*2.-1.;
ro1=rho(idmat1(k,i,j));
rox=0.5*(ro1+rho(idmat1(k,i+1,j)));
roy=0.5*(ro1+rho(idmat1(k,i,j+1)));
roz=0.5*(ro1+rho(idmat1(k-1,i,j)));
vtmpx=v1x(k,i,j)-v1x_px(k,ib,j);
vtmpy=v1y(k,i,j)-v1y_px(k,ib,j);
vtmpz=v1z(k,i,j)-v1z_px(k,ib,j);
//if(j>nd1_vel(2) && j<nd1_vel(5))
if(j>nd1_vel[1] && j<nd1_vel[4])
{
dtxy=dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+
dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1);
dtyy=dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+
dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2);
dtzy=dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+
dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1);
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(k,i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(2,k)*t1yz(k,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else if(k==nztop)
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
else
{
dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j);
}
vtmpx=vtmpx+(dtxy+dtxz)*rox;
vtmpy=vtmpy+(dtyy+dtyz)*roy;
vtmpz=vtmpz+(dtzy+dtzz)*roz;
}
v1x_px(k,ib,j)=v1x_px(k,ib,j)*dmpx1+dmpx2*rox*
dxi1(2,i)/ca*(t1xx(k,i,j)-t1xx(k,i+1,j));
v1x(k,i,j)=vtmpx+v1x_px(k,ib,j);
v1y_px(k,ib,j)=v1y_px(k,ib,j)*dmpyz1+dmpyz2*roy*
dxh1(2,i)/ca*(t1xy(k,i-1,j)-t1xy(k,i,j));
v1y(k,i,j)=vtmpy+v1y_px(k,ib,j);
v1z_px(k,ib,j)=v1z_px(k,ib,j)*dmpyz1+dmpyz2*roz*
dxh1(2,i)/ca*(t1xz(k,i-1,j)-t1xz(k,i,j));
v1z(k,i,j)=vtmpz+v1z_px(k,ib,j);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlY_IC(int nztop,
float ca,
int lby0,
int lby1,
int *nd1_vel,
float *rhoM,
float *drvh1M,
float *drti1M,
int *idmat1M,
float *damp1_yM,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *dzh1M,
float *t1xxM,
float *t1xyM,
float *t1xzM,
float *t1yyM,
float *t1yzM,
float *t1zzM,
int mw1_pml1, //dimension #s
int mw1_pml,
int nxtop,
int nytop,
float *v1xM, //output
float *v1yM,
float *v1zM,
float *v1x_pyM,
float *v1y_pyM,
float *v1z_pyM)
{
int i,j,k,lb,jb,kb, jbIni;
float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz,
dtxz,dtyz,dtzz,vtmpx,vtmpy,vtmpz;
//if( lby(1)>lby(2) )
if( lby0>lby1 )
return;
lb = blockDim.x * blockIdx.x + threadIdx.x + lby0;
i = blockDim.y * blockIdx.y + threadIdx.y + nd1_vel[6];
if (lb > lby1 || i > nd1_vel[11])
{
return;
}
jbIni = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_vel[4*k]; j <= nd1_vel[1+4*k]; j++)
{
jbIni++;
}
}
jb = jbIni;
kb = 0;
//for (lb = lby(1); lb <= lby(2); lb++)
//for (lb = lby0; lb <= lby1; lb++)
//{
// kb=0;
// //for (i = nd1_vel(7); i <= nd1_vel(12); i++)
// for (i = nd1_vel[6]; i <= nd1_vel[11]; i++)
// {
//for (j = nd1_vel(1+4*lb); j <= nd1_vel(2+4*lb); j++)
for (j = nd1_vel[4*lb]; j <= nd1_vel[1+4*lb]; j++)
{
kb=kb+1;
jb=jb+1;
rth=drvh1(kb,lb);
rti=drti1(kb,lb);
for (k = 1; k <= nztop; k++)
{
damp0=damp1_y(k,i,lb);
dmpy2=1./(1.+rth*damp0);
dmpy1=dmpy2*2.-1.;
dmpxz2=1./(1.+rti*damp0);
dmpxz1=dmpxz2*2.-1.;
ro1=rho(idmat1(k,i,j));
rox=0.5*(ro1+rho(idmat1(k,i+1,j)));
roy=0.5*(ro1+rho(idmat1(k,i,j+1)));
roz=0.5*(ro1+rho(idmat1(k-1,i,j)));
if(k==1)
{
dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j);
dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j);
dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+
21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j));
}
else if(k==2)
{
dtxz=dzi1(2,k)*t1xz(k,i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(2,k)*t1yz(k,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)-
3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j));
}
else if(k==nztop)
{
dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j));
dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j));
dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j));
}
else
{
dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+
dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j);
dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+
dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j);
dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+
dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j);
}
vtmpx=v1x(k,i,j)-v1x_py(k,i,jb)+dtxz*rox;
vtmpy=v1y(k,i,j)-v1y_py(k,i,jb)+dtyz*roy;
vtmpz=v1z(k,i,j)-v1z_py(k,i,jb)+dtzz*roz;
//if(i>nd1_vel(8) && i<nd1_vel(11))
if(i>nd1_vel[7] && i<nd1_vel[10])
{
vtmpx=vtmpx+
rox*(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+
dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j));
vtmpy=vtmpy+
roy*(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+
dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j));
vtmpz=vtmpz+
roz*(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+
dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j));
}
v1x_py(k,i,jb)=v1x_py(k,i,jb)*dmpxz1+dmpxz2*
rox*dyh1(2,j)/ca*(t1xy(k,i,j-1)-t1xy(k,i,j));
v1x(k,i,j)=vtmpx+v1x_py(k,i,jb);
v1y_py(k,i,jb)=v1y_py(k,i,jb)*dmpy1+dmpy2*
roy*dyi1(2,j)/ca*(t1yy(k,i,j)-t1yy(k,i,j+1));
v1y(k,i,j)=vtmpy+v1y_py(k,i,jb);
v1z_py(k,i,jb)=v1z_py(k,i,jb)*dmpxz1+dmpxz2*
roz*dyh1(2,j)/ca*(t1yz(k,i,j-1)-t1yz(k,i,j));
v1z(k,i,j)=vtmpz+v1z_py(k,i,jb);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlX_IIC(int nzbm1,
float ca,
int lbx0,
int lbx1,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_xM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pxM,
float *v2y_pxM,
float *v2z_pxM)
{
int i,j,k,lb,ib,kb;
float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxy,dtyy,dtzy,dtxz,dtyz,dtzz;
//int nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml;
int nv2y = (lbx1 - lbx0 + 1) * mw2_pml;
//if ( lbx(1)>lbx(2) ) return;
if ( lbx0>lbx1 ) return;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_vel[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd2_vel[6+4*k]; i <= nd2_vel[7+4*k]; i++)
{
ib++;
}
}
//for (j = nd2_vel(1); j <= nd2_vel(6); j++)
//for (j = nd2_vel[0]; j <= nd2_vel[5]; j++)
//{
//ib=0;
//for (lb = lbx(1); lb <= lbx(2); lb++)
//for (lb = lbx0; lb <= lbx1; lb++)
//{
kb=0;
//for (i = nd2_vel(7+4*lb); i <= nd2_vel(8+4*lb); i++)
for (i = nd2_vel[6+4*lb]; i <= nd2_vel[7+4*lb]; i++)
{
kb=kb+1;
ib=ib+1;
rth=drvh2(kb,lb);
rti=drti2(kb,lb);
for (k = 1; k <= nzbm1; k++)
{
damp0=damp2_x(k,j,lb);
dmpx2=1./(1.+rth*damp0);
dmpx1=dmpx2*2.-1.;
dmpyz2=1./(1.+rti*damp0);
dmpyz1=dmpyz2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_px(k,ib,j);
vtmpy=v2y(k,i,j)-v2y_px(k,ib,j);
vtmpz=v2z(k,i,j)-v2z_px(k,ib,j);
//if(j>nd2_vel(2) && j<nd2_vel(5))
if(j>nd2_vel[1] && j<nd2_vel[4])
{
dtxy=dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1);
dtyy=dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2);
dtzy=dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1);
if(k==1)
{
dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
}
//else if(k<nd2_vel(17))
else if(k<nd2_vel[16])
{
dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j);
dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j);
dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j);
}
else
{
dtxz=0.0;
dtyz=0.0;
dtzz=0.0;
}
vtmpx=vtmpx+(dtxy+dtxz)*rox;
vtmpy=vtmpy+(dtyy+dtyz)*roy;
vtmpz=vtmpz+(dtzy+dtzz)*roz;
}
v2x_px(k,ib,j)=v2x_px(k,ib,j)*dmpx1+dmpx2*
rox*dxi2(2,i)/ca*(t2xx(k,i,j)-t2xx(k,i+1,j));
v2x(k,i,j)=vtmpx+v2x_px(k,ib,j);
v2y_px(k,ib,j)=v2y_px(k,ib,j)*dmpyz1+dmpyz2*
roy*dxh2(2,i)/ca*(t2xy(k,i-1,j)-t2xy(k,i,j));
v2y(k,i,j)=vtmpy+v2y_px(k,ib,j);
v2z_px(k,ib,j)=v2z_px(k,ib,j)*dmpyz1+dmpyz2*
roz*dxh2(2,i)/ca*(t2xz(k,i-1,j)-t2xz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_px(k,ib,j);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlY_IIC(int nzbm1,
float ca,
int lby0,
int lby1,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_yM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1,
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pyM,
float *v2y_pyM,
float *v2z_pyM)
{
int i,j,k,lb,jb,kb, jbIni;
float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz,
vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz;
//if( lby(1)>lby(2) ) return;
if( lby0>lby1 )
{
return;
}
lb = blockIdx.x * blockDim.x + threadIdx.x + lby0;
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6];
if (lb > lby1 || i > nd2_vel[11])
{
return;
}
jbIni = 0;
for (j = lby0; j < lb; j++)
{
for (k = nd2_vel[4*j]; k <= nd2_vel[1+4*j]; k++)
{
jbIni++;
}
}
jb = jbIni;
kb = 0;
//for (lb = lby(1); lb <= lby(2); lb++)
//for (lb = lby0; lb <= lby1; lb++)
//{
//kb=0;
//for (i = nd2_vel(7); i <= nd2_vel(12); i++)
//for (i = nd2_vel[6]; i <= nd2_vel[11]; i++)
//{
//for (j = nd2_vel(1+4*lb); j <= nd2_vel(2+4*lb); j++)
for (j = nd2_vel[4*lb]; j <= nd2_vel[1+4*lb]; j++)
{
kb=kb+1;
jb=jb+1;
rth=drvh2(kb,lb);
rti=drti2(kb,lb);
for (k = 1; k <= nzbm1; k++)
{
damp0=damp2_y(k,i,lb);
dmpy2=1./(1.+rth*damp0);
dmpy1=dmpy2*2.-1.0;
dmpxz2=1./(1.+rti*damp0);
dmpxz1=dmpxz2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_py(k,i,jb);
vtmpy=v2y(k,i,j)-v2y_py(k,i,jb);
vtmpz=v2z(k,i,j)-v2z_py(k,i,jb);
//if(k<nd2_vel(17))
if(k<nd2_vel[16])
{
if(k>1)
{
dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+
dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j);
dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+
dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j);
dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+
dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j);
}
else
{
dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
}
//if(i>nd2_vel(8) && i<nd2_vel(11))
if(i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*(dtxz+
dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j));
vtmpy=vtmpy+roy*(dtyz+
dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j));
vtmpz=vtmpz+roz*(dtzz+
dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j));
}
else
{
vtmpx=vtmpx+rox*dtxz;
vtmpy=vtmpy+roy*dtyz;
vtmpz=vtmpz+roz*dtzz;
}
}
else
{
//if(i>nd2_vel(8) && i<nd2_vel(11))
if(i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j));
vtmpy=vtmpy+ roy*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j));
vtmpz=vtmpz+ roz*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j));
}
}
v2x_py(k,i,jb)=v2x_py(k,i,jb)*dmpxz1+dmpxz2*rox*
dyh2(2,j)/ca*(t2xy(k,i,j-1)-t2xy(k,i,j));
v2x(k,i,j)=vtmpx+v2x_py(k,i,jb);
v2y_py(k,i,jb)=v2y_py(k,i,jb)*dmpy1+dmpy2*roy*
dyi2(2,j)/ca*(t2yy(k,i,j)-t2yy(k,i,j+1));
v2y(k,i,j)=vtmpy+v2y_py(k,i,jb);
v2z_py(k,i,jb)=v2z_py(k,i,jb)*dmpxz1+dmpxz2*roz*
dyh2(2,j)/ca*(t2yz(k,i,j-1)-t2yz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_py(k,i,jb);
}
}
//}
//}
return;
}
//-----------------------------------------------------------------------
__global__ void vel_PmlZ_IIC(int nzbm1,
float ca,
int *nd2_vel,
float *drvh2M,
float *drti2M,
float *rhoM,
float *damp2_zM,
int *idmat2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *dxh2M,
float *dyh2M,
float *dzh2M,
float *t2xxM,
float *t2xyM,
float *t2xzM,
float *t2yyM,
float *t2yzM,
float *t2zzM,
int mw2_pml1, //dimension #s
int mw2_pml,
int nxbtm,
int nybtm,
int nzbtm,
float *v2xM, //output
float *v2yM,
float *v2zM,
float *v2x_pzM,
float *v2y_pzM,
float *v2z_pzM)
{
int i,j,k,kb;
float damp0,dmpz2,dmpz1,dmpxy2,dmpxy1,ro1,rox,roy,roz,vtmpx,vtmpy,vtmpz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6];
if (j > nd2_vel[5] || i > nd2_vel[11])
{
return;
}
//for (j = nd2_vel(1); j <= nd2_vel(6); j++)
//for (j = nd2_vel[0]; j <= nd2_vel[5]; j++)
//{
//for (i = nd2_vel(7); i <= nd2_vel(12); i++)
//for (i = nd2_vel[6]; i <= nd2_vel[11]; i++)
//{
kb=0;
damp0=damp2_z(i,j);
//for (k = nd2_vel(17); k <= nzbm1; k++)
for (k = nd2_vel[16]; k <= nzbm1; k++)
{
kb=kb+1;
dmpz2=1./(1.+damp0*drti2(kb,1));
dmpz1=dmpz2*2.-1.;
dmpxy2=1./(1.+damp0*drvh2(kb,1));
dmpxy1=dmpxy2*2.-1.;
ro1=rho(idmat2(k,i,j));
rox=0.5*(ro1+rho(idmat2(k,i+1,j)));
roy=0.5*(ro1+rho(idmat2(k,i,j+1)));
roz=0.5*(ro1+rho(idmat2(k-1,i,j)));
vtmpx=v2x(k,i,j)-v2x_pz(kb,i,j);
vtmpy=v2y(k,i,j)-v2y_pz(kb,i,j);
vtmpz=v2z(k,i,j)-v2z_pz(kb,i,j);
//if(j>nd2_vel(2) && j<nd2_vel(5) &&
// i>nd2_vel(8) && i<nd2_vel(11))
if(j>nd2_vel[1] && j<nd2_vel[4] &&
i>nd2_vel[7] && i<nd2_vel[10])
{
vtmpx=vtmpx+rox*
(dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+
dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+
dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+
dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1));
vtmpy=vtmpy+roy*
(dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+
dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+
dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+
dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2));
vtmpz=vtmpz+roz*
(dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+
dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+
dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+
dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1));
}
v2x_pz(kb,i,j)=v2x_pz(kb,i,j)*dmpxy1+dmpxy2*rox*
dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j));
v2x(k,i,j)=vtmpx+v2x_pz(kb,i,j);
v2y_pz(kb,i,j)=v2y_pz(kb,i,j)*dmpxy1+dmpxy2*roy*
dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j));
v2y(k,i,j)=vtmpy+v2y_pz(kb,i,j);
v2z_pz(kb,i,j)=v2z_pz(kb,i,j)*dmpz1+dmpz2*roz*
dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j));
v2z(k,i,j)=vtmpz+v2z_pz(kb,i,j);
}
//}
//}
return;
}
//stress computation----------------------------------------------
__global__ void stress_norm_xy_IC(int nxb1,
int nyb1,
int nxtop,
int nztop,
int *nd1_tyy,
int *idmat1M,
float ca,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh1M,
float *dyh1M,
float *dxi1M,
float *dyi1M,
float *dzi1M,
float *t1xxM,
float *t1xyM,
float *t1yyM,
float *t1zzM,
float *qt1xxM,
float *qt1xyM,
float *qt1yyM,
float *qt1zzM,
float *v1xM,
float *v1yM,
float *v1zM)
{
int i,j,k,jkq,kodd,inod,irw;
float sxx,syy,szz,sxy,qxx,qyy,qzz,qxy,cusxy,sss,cl,sm2,pm,et,et1,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyy[8];
if (j > nd1_tyy[3] || i > nd1_tyy[9])
{
return;
}
// for (j = nd1_tyy[2]; j <= nd1_tyy[3]; j++)
// {
kodd = 2 * ((j + nyb1) & 1) + 1;
// for (i = nd1_tyy[8]; i <= nd1_tyy[9]; i++)
// {
jkq=((i+nxb1) & 1) + kodd;
for (k = nd1_tyy[12]; k <= nd1_tyy[17]; k++)
{
sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+
dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j);
syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+
dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1);
sxy=dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i, j)+
dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j)+
dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j )+
dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2);
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.0;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else
{
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
cusxy=sxy/(1./sm2+.5/cmu(idmat1(k,i+1,j+1)));
sss=sxx+syy+szz;
irw=jkq+4*(k&1);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1;
t1xx(k,i,j)=t1xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1;
t1yy(k,i,j)=t1yy(k,i,j)+sm2*syy+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1;
t1zz(k,i,j)=t1zz(k,i,j)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1;
t1xy(k,i,j)=t1xy(k,i,j)+cusxy-qxy-qt1xy(k,i,j);
}
// }
// }
return;
}
//-----------------------------------------------------------------------------
__global__ void stress_xz_yz_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int *nd1_tyz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *dzh1M,
float *v1xM,
float *v1yM,
float *v1zM,
float *t1xzM,
float *t1yzM,
float *qt1xzM,
float *qt1yzM)
// Compute stress-XZand YZ component in Region I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// real, parameter:: tfr1=-577./528./ca,tfr2=201./176./ca, &
// tfr3=-9./176./ca, tfr4=1./528./ca
{
// float tfr1 = -577./528./ca;
// float tfr2 = 201./176./ca;
// float tfr3 = -9./176./ca;
// float tfr4=1./528./ca;
int i,j,k,kodd,inod,jkq,irw;
float dvzx,dvzy,dvxz,dvyz,sm,cusxz,cusyz,et,et1,dmws,qxz,qyz;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyz[8];
if (j > nd1_tyz[3] || i > nd1_tyz[9])
{
return;
}
// for (j=nd1_tyz[2]; j <=nd1_tyz[3]; j++)
// //do j=nd1_tyz(3),nd1_tyz(4)
// {
//kodd=2*mod(j+nyb1,2)+1
kodd=2*((j+nyb1)&1)+1;
// for (i=nd1_tyz[8]; i<=nd1_tyz[9]; i++)
// //do i=nd1_tyz(9),nd1_tyz(10)
// {
//jkq=mod(i+nxb1,2)+kodd
jkq=((i+nxb1)&1)+kodd;
for (k=nd1_tyz[12]; k<=nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+
dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j);
dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+
dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2);
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
inod=idmat1(k,i,j);
sm=cmu(inod);
cusxz=(dvzx+dvxz)/(.5/sm+.5/cmu(idmat1(k-1,i+1,j)));
cusyz=(dvzy+dvyz)/(.5/sm+.5/cmu(idmat1(k-1,i,j+1)));
//irw=jkq+4*mod(k,2);
irw=jkq+4*(k&1);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j);
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j);
}
// }
// }
return;
}
__global__ void stress_resetVars(int ny1p1,
int nx1p1,
int nxtop,
int nytop,
int nztop,
float *t1xzM,
float *t1yzM)
{
int i, j;
j = blockIdx.x * blockDim.x + threadIdx.x - 1;
i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j <= ny1p1 && i <= nxtop)
{
t1yz(1, i, j) = 0.0f;
}
// for (j=-1; j<=ny1p1; j++)
// {
// for (i = 1; i <= nxtop; i++)
// {
// t1yz(1,i,j)=0.0;
// }
// }
j = j + 2;
i = i - 2;
if (j <= nytop && i <= nx1p1)
{
t1xz(1, i, j) = 0.0;
}
// for (j=1; j <= nytop; j++)
// {
// for (i=-1; i <=nx1p1; i++)
// {
// t1xz(1,i,j)=0.0;
// }
// }
return;
}
//------------------------------------------------------------------------------------
__global__ void stress_norm_PmlX_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int mw1_pml,
int mw1_pml1,
int lbx0,
int lbx1,
int *nd1_tyy,
int *idmat1M,
float ca,
float *drti1M,
float *damp1_xM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dzi1M,
float *dxh1M,
float *dyh1M,
float *v1xM,
float *v1yM,
float *v1zM,
float *t1xxM,
float *t1yyM,
float *t1zzM,
float *t1xx_pxM,
float *t1yy_pxM,
float *qt1xxM,
float *qt1yyM,
float *qt1zzM,
float *qt1xx_pxM,
float *qt1yy_pxM)
// Compute the velocity of PML-x-I region
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
// real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
int nti;
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_tyy[5] || lb > lbx1)
{
return;
}
nti = (lbx1 - lbx0 + 1) * mw1_pml + lbx1;
// for (j=nd1_tyy[0]; j <= nd1_tyy[5]; j++)
// //do j=nd1_tyy(1),nd1_tyy(6)
// {
kodd=2*((j+nyb1)&1)+1;
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_tyy[6+4*k]; i <= nd1_tyy[7+4*k]; i++)
ib++;
}
// for (lb=lbx[0]; lb <=lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_tyy[6+4*lb]; i <= nd1_tyy[7+4*lb]; i++)
//do i=nd1_tyy(7+4*lb),nd1_tyy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rti=drti1(kb,lb);
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++)
//do k=nd1_tyy(13),nd1_tyy(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rti);
damp1=damp2*2.0-1.;
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t1xx(k,i,j)-t1xx_px(k,ib,j);
//debug
//t1xx(k,i,j)=t1xx_px(k,ib,j);
taoyy=t1yy(k,i,j)-t1yy_px(k,ib,j);
taozz=t1zz(k,i,j)-t1yy_px(k,ib,j);
if(j>nd1_tyy[1] && j<nd1_tyy[4]) {
//if(j>nd1_tyy(2) .and. j<nd1_tyy(5)) {
syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+
dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1);
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else {
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
sss=syy+szz;
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1;
taoxx=taoxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
}
sxx=dxh1(2,i)/ca*(v1x(k,i-1,j)-v1x(k,i,j));
qxx=qt1xx_px(k,ib,j);
qt1xx_px(k,ib,j)=qxx*et+wtp*sxx*et1;
t1xx_px(k,ib,j)=damp1*t1xx_px(k,ib,j)+
damp2*(pm*sxx-qxx-qt1xx_px(k,ib,j));
t1xx(k,i,j)=taoxx+t1xx_px(k,ib,j);
qyy=qt1yy_px(k,ib,j);
qt1yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1;
t1yy_px(k,ib,j)=damp1*t1yy_px(k,ib,j)+
damp2*(cl*sxx-qyy-qt1yy_px(k,ib,j));
t1yy(k,i,j)=taoyy+t1yy_px(k,ib,j);
t1zz(k,i,j)=taozz+t1yy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_tyy,
int *idmat1M,
float ca,
float *drti1M,
float *damp1_yM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh1M,
float *dyh1M,
float *dzi1M,
float *t1xxM,
float *t1yyM,
float *t1zzM,
float *qt1xxM,
float *qt1yyM,
float *qt1zzM,
float *t1xx_pyM,
float *t1yy_pyM,
float *qt1xx_pyM,
float *qt1yy_pyM,
float *v1xM,
float *v1yM,
float *v1zM)
// Compute the velocity of PML-x-I region
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
// real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
//if(lby[0]>lby[1]) return;
//if(lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_tyy[11] || lb > lby1)
{
return;
}
// for (i = nd1_tyy[6]; i <= nd1_tyy[11]; i++)
// //do i=nd1_tyy(7),nd1_tyy(12)
// {
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_tyy[4*k]; j <= nd1_tyy[1+4*k]; j++)
{
jb++;
}
}
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd1_tyy[4*lb]; j <= nd1_tyy[1+4*lb]; j++)
//do j=nd1_tyy(1+4*lb),nd1_tyy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rti=drti1(kb,lb);
kodd=2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1
jkq = ((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++)
//do k=nd1_tyy(13),nd1_tyy(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rti);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
//irw=jkq+4*mod(k,2)
irw=jkq + 4 * (k & 1);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if (i>nd1_tyy[7] && i<nd1_tyy[10]) {
//if(i>nd1_tyy(8) .and. i<nd1_tyy(11)) then
sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+
dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j);
}
else {
sxx=0.0;
}
if(k==1) {
szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)-
9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.;
}
else if(k==nztop) {
szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j));
}
else {
szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+
dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j);
}
sss=sxx+szz;
qxx=qt1xx(k,i,j);
qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1;
taoxx=t1xx(k,i,j)-t1xx_py(k,i,jb)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j);
qyy=qt1yy(k,i,j);
qt1yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1;
taoyy=t1yy(k,i,j)-t1yy_py(k,i,jb)+cl*sss-qyy-qt1yy(k,i,j);
qzz=qt1zz(k,i,j);
qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1;
taozz=t1zz(k,i,j)-t1xx_py(k,i,jb)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j);
syy=dyh1(2,j)/ca*(v1y(k,i,j-1)-v1y(k,i,j));
qxx=qt1xx_py(k,i,jb);
qt1xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1;
t1xx_py(k,i,jb)=damp1*t1xx_py(k,i,jb)+
damp2*(cl*syy-qxx-qt1xx_py(k,i,jb));
t1xx(k,i,j)=taoxx+t1xx_py(k,i,jb);
t1zz(k,i,j)=taozz+t1xx_py(k,i,jb);
qyy=qt1yy_py(k,i,jb);
qt1yy_py(k,i,jb)=qyy*et+wtp*syy*et1;
t1yy_py(k,i,jb)=damp1*t1yy_py(k,i,jb)+
damp2*(pm*syy-qyy-qt1yy_py(k,i,jb));
t1yy(k,i,j)=taoyy+t1yy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlX_IC(int nxb1,
int nyb1,
int mw1_pml,
int mw1_pml1,
int nxtop,
int nytop,
int nztop,
int lbx0,
int lbx1,
int *nd1_txy,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *t1xyM,
float *qt1xyM,
float *t1xy_pxM,
float *qt1xy_pxM,
float *v1xM,
float *v1yM)
// Compute the Stress-xy at region of PML-x-I
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
// real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_txy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_txy[6+4*k]; i <= nd1_txy[7+4*k]; i++)
{
ib++;
}
}
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return
// for (j = nd1_txy[0]; j <= nd1_txy[5]; j++)
// //do j=nd1_txy(1),nd1_txy(6)
// {
kodd = 2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1
// ib=0;
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_txy[6+4*lb]; i <= nd1_txy[7+4*lb]; i++)
//do i=nd1_txy(7+4*lb),nd1_txy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth1(kb,lb);
jkq=((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd;
for (k = nd1_txy[12]; k <= nd1_txy[17]; k++)
//do k=nd1_txy(13),nd1_txy(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1)));
irw=jkq + 4 * (k & 1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t1xy(k,i,j)-t1xy_px(k,ib,j);
if(j > nd1_txy[1] && j<nd1_txy[4]) {
//if(j>nd1_txy(2) .and. j<nd1_txy(5)) then
cusxy=(dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j)+
dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2))*sm;
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt1xy(k,i,j);
}
cusxy=sm*dxi1(2,i)/ca*(v1y(k,i,j)-v1y(k,i+1,j));
qxy=qt1xy_px(k,ib,j);
qt1xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1;
t1xy_px(k,ib,j)=damp1*t1xy_px(k,ib,j)+
damp2*(cusxy-qxy-qt1xy_px(k,ib,j));
t1xy(k,i,j)=taoxy+t1xy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_txy,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dyi1M,
float *t1xyM,
float *qt1xyM,
float *t1xy_pyM,
float *qt1xy_pyM,
float *v1xM,
float *v1yM)
//Compute the Stress-xy at region of PML-y-I
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_txy[11] || lb > lby1)
{
return;
}
// for (i = nd1_txy[6]; i <= nd1_txy[11]; i++)
// //do i=nd1_txy(7),nd1_txy(12)
// {
jb=0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_txy[4*k]; j <= nd1_txy[1 + 4 * k]; j++)
{
jb++;
}
}
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1), lby(2)
// {
kb=0;
for (j = nd1_txy[4*lb]; j <= nd1_txy[1 + 4 * lb]; j++)
//do j=nd1_txy(1+4*lb),nd1_txy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth1(kb,lb);
kodd=2 * ((j + nyb1) & 1) + 1;
//kodd=2*mod(j+nyb1,2)+1;
jkq=((i + nxb1) & 1) + kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txy[12]; k <= nd1_txy[17]; k++)
//do k=nd1_txy(13),nd1_txy(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t1xy(k,i,j)-t1xy_py(k,i,jb);
if(i > nd1_txy[7] && i<nd1_txy[10]) {
//if(i>nd1_txy(8) .and. i<nd1_txy(11)) then
cusyx=(dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i,j)+
dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j))*sm;
qxy=qt1xy(k,i,j);
qt1xy(k,i,j)=qxy*et+dmws*cusyx*et1;
taoxy=taoxy+cusyx-qxy-qt1xy(k,i,j);
}
cusyx=sm*dyi1(2,j)/ca*(v1x(k,i,j)-v1x(k,i,j+1));
qxy=qt1xy_py(k,i,jb);
qt1xy_py(k,i,jb)=qxy*et+dmws*cusyx*et1;
t1xy_py(k,i,jb)=damp1*t1xy_py(k,i,jb)+
damp2*(cusyx-qxy-qt1xy_py(k,i,jb));
t1xy(k,i,j)=taoxy+t1xy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlX_IC(int nxb1,
int nyb1,
int nxtop,
int nytop,
int nztop,
int mw1_pml,
int mw1_pml1,
int lbx0,
int lbx1,
int *nd1_txz,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dzh1M,
float *t1xzM,
float *qt1xzM,
float *t1xz_pxM,
float *qt1xz_pxM,
float *v1xM,
float *v1zM)
//Compute the stress-xz at PML-x-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if (lbx[0] > lbx[1]) return;
//if ( lbx(1)>lbx(2) ) return
nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_txz[5] || lb > lbx1)
{
return;
}
// for (j = nd1_txz[0]; j <= nd1_txz[5]; j++)
// //do j=nd1_txz(1),nd1_txz(6)
// {
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i = nd1_txz[6+4*k]; i <= nd1_txz[7+4*k]; i++)
{
ib++;
}
}
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i = nd1_txz[6+4*lb]; i <= nd1_txz[7+4*lb]; i++)
//do i=nd1_txz(7+4*lb),nd1_txz(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth1(kb,lb);
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txz[12]; k <= nd1_txz[17]; k++)
//do k=nd1_txz(13),nd1_txz(18)
{
damp2=1./(1.+damp1_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
}
cusxz=dvxz*sm;
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=t1xz(k,i,j)-t1xz_px(k,ib,j)+cusxz-qxz-qt1xz(k,i,j);
cusxz=sm*dxi1(2,i)/ca*(v1z(k,i,j)-v1z(k,i+1,j));
qxz=qt1xz_px(k,ib,j);
qt1xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1;
t1xz_px(k,ib,j)=damp1*t1xz_px(k,ib,j)+
damp2*(cusxz-qxz-qt1xz_px(k,ib,j));
t1xz(k,i,j)=taoxz+t1xz_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlY_IC(int nxb1,
int nyb1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_txz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi1M,
float *dzh1M,
float *t1xzM,
float *qt1xzM,
float *v1xM,
float *v1zM)
//Compute the stress-xz at PML-y-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusxz,dvxz,dvzx,qxz,sm,dmws,et,et1;
//if (lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[8];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_txz[9] || lb > lby1)
{
return;
}
// for (i = nd1_txz[8]; i <= nd1_txz[9]; i++)
// //do i=nd1_txz(9),nd1_txz(10)
// {
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
for (j = nd1_txz[4*lb]; j <= nd1_txz[1+4*lb]; j++)
//do j=nd1_txz(1+4*lb),nd1_txz(2+4*lb)
{
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_txz[12]; k <= nd1_txz[17]; k++)
//do k=nd1_txz(13),nd1_txz(18)
{
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+
dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j);
if(k<nztop) {
dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+
dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j);
}
else {
dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j));
}
cusxz=(dvzx+dvxz)*sm;
qxz=qt1xz(k,i,j);
qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_yz_PmlX_IC(int nxb1,
int nyb1,
int nztop,
int nxtop,
int lbx0,
int lbx1,
int *nd1_tyz,
int *idmat1M,
float ca,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi1M,
float *dzh1M,
float *t1yzM,
float *qt1yzM,
float *v1yM,
float *v1zM)
//Compute the stress-yz at PML-x-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1;
//if(lbx[0] > lbx[1]) return;
//if(lbx(1)>lbx(2) ) return
j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd1_tyz[3] || lb > lbx1)
{
return;
}
// for (j = nd1_tyz[2]; j <= nd1_tyz[3]; j++)
// //do j=nd1_tyz(3),nd1_tyz(4)
// {
kodd=2 * ((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
for (i = nd1_tyz[6+4*lb]; i <= nd1_tyz[7+4*lb]; i++)
//do i=nd1_tyz(7+4*lb),nd1_tyz(8+4*lb)
{
jkq = ((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k = nd1_tyz[12]; k <= nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+
dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2);
if(k<nztop) {
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
cusyz=(dvzy+dvyz)*sm;
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_yz_PmlY_IC(int nxb1,
int nyb1,
int mw1_pml1,
int nxtop,
int nztop,
int lby0,
int lby1,
int *nd1_tyz,
int *idmat1M,
float ca,
float *drth1M,
float *damp1_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi1M,
float *dzh1M,
float *t1yzM,
float *qt1yzM,
float *t1yz_pyM,
float *qt1yz_pyM,
float *v1yM,
float *v1zM)
//Compute the stress-yz at PML-y-I region
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd1_tyz[11] || lb > lby1)
{
return;
}
// for (i = nd1_tyz[6]; i <= nd1_tyz[11]; i++)
// //do i=nd1_tyz(7),nd1_tyz(12)
// {
jb=0;
for (k = lby0; k < lb; k++)
{
for (j = nd1_tyz[4*k]; j <= nd1_tyz[1+4*k]; j++)
{
jb++;
}
}
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd1_tyz[4*lb]; j <= nd1_tyz[1+4*lb]; j++)
//do j=nd1_tyz(1+4*lb),nd1_tyz(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth1(kb,lb);
kodd=2*((j+nyb1)&1)+1;
//kodd=2*mod(j+nyb1,2)+1;
jkq=((i+nxb1)&1)+kodd;
//jkq=mod(i+nxb1,2)+kodd
for (k=nd1_tyz[12]; k <= nd1_tyz[17]; k++)
//do k=nd1_tyz(13),nd1_tyz(18)
{
damp2=1./(1.+damp1_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat1(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1)));
irw=jkq+4*(k&1);
//irw=jkq+4*mod(k,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
if(k<nztop) {
dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+
dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j);
}
else {
dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j));
}
cusyz=dvyz*sm;
qyz=qt1yz(k,i,j);
qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=t1yz(k,i,j)-t1yz_py(k,i,jb)+cusyz-qyz-qt1yz(k,i,j);
cusyz=sm*dyi1(2,j)/ca*(v1z(k,i,j)-v1z(k,i,j+1));
qyz=qt1yz_py(k,i,jb);
qt1yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1;
t1yz_py(k,i,jb)=damp1*t1yz_py(k,i,jb)+
damp2*(cusyz-qyz-qt1yz_py(k,i,jb));
t1yz(k,i,j)=taoyz+t1yz_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_norm_xy_II(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_tyy,
int *idmat2M,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *t2xxM,
float *t2xyM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2xyM,
float *qt2yyM,
float *qt2zzM,
float *dxh2M,
float *dyh2M,
float *dxi2M,
float *dyi2M,
float *dzi2M,
float *v2xM,
float *v2yM,
float *v2zM)
// Compute stress-Norm and XY component in Region II
// use grid_node_comm
// use wave_field_comm
// implicit NONE
// integer:: i,j,k,kodd,inod,jkq,irw
// real:: sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy, &
// cl,sm2,et,et1,dmws,pm,wtp,wts
{
int i,j,k,kodd,inod,jkq,irw;
float sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy,cl,sm2,et,et1,dmws,pm,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[8];
if (j > nd2_tyy[3] || i > nd2_tyy[9])
{
return;
}
// for (j=nd2_tyy[2]; j <= nd2_tyy[3]; j++)
// //do j=nd2_tyy(3),nd2_tyy(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyy[8]; i <= nd2_tyy[9]; i++)
// //do i=nd2_tyy(9),nd2_tyy(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyy[12]; k <= nd2_tyy[15]; k++)
//do k=nd2_tyy(13),nd2_tyy(16)
{
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
sxy=dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+
dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2);
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
sss=sxx+syy+szz;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
cusxy=sxy/(1./sm2+.5/cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1;
t2xx(k,i,j)=t2xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1;
t2yy(k,i,j)=t2yy(k,i,j)+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1;
t2zz(k,i,j)=t2zz(k,i,j)+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1;
t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j);
}
// }
// }
return;
}
//call stress_xz_yz_II
__global__ void stress_xz_yz_IIC(int nxb2,
int nyb2,
int nztop,
int nxbtm,
int nzbtm,
int *nd2_tyz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *dzh2M,
float *t2xzM,
float *t2yzM,
float *qt2xzM,
float *qt2yzM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute stress-XZ and YZ component in the Region II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,kodd,inod,jkq,irw
//real:: qxz,qyz,cusxz,cusyz,sm,et,et1,dmws
{
int i,j,k,kodd,inod,jkq,irw;
float qxz,qyz,cusxz,cusyz,sm,et,et1,dmws;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[8];
if (j > nd2_tyz[3] || i > nd2_tyz[9])
{
return;
}
// for (j = nd2_tyz[2]; j <= nd2_tyz[3]; j++)
// //do j=nd2_tyz(3),nd2_tyz(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyz[8]; i <= nd2_tyz[9]; i++)
// //do i=nd2_tyz(9),nd2_tyz(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_tyz[12]; k <= nd2_tyz[15]; k++)
//do k=nd2_tyz(13),nd2_tyz(16)
{
inod=idmat2(k,i,j);
sm=cmu(inod);
cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j)+
dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j))/
(.5/sm+.5/cmu(idmat2(k-1,i+1,j)));
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+
dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/
(.5/sm+.5/cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j);
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j);
}
// }
// }
return;
}
//call stress_norm_PmlX_II
__global__ void stress_norm_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nztop,
int nxbtm,
int nybtm,
int nzbtm,
int lbx0,
int lbx1,
int *nd2_tyy,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_xM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pxM,
float *t2yy_pxM,
float *qt2xx_pxM,
float *qt2yy_pxM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the Stress-norm at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
int nti;
//if(lbx[0] > lbx[1]) return;
//if( lbx(1)>lbx(2) ) return
nti = (lbx1 - lbx0 + 1) * mw2_pml + lbx1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_tyy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_tyy[6+4*k]; i <= nd2_tyy[7+4*k]; i++)
{
ib++;
}
}
// for (j=nd2_tyy[0]; j <= nd2_tyy[5]; j++)
// //do j=nd2_tyy(1),nd2_tyy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb=lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_tyy[6+4*lb]; i <= nd2_tyy[7+4*lb]; i++)
//do i=nd2_tyy(7+4*lb),nd2_tyy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rti=drti2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd;
for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(13),nd2_tyy(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rti);
damp1=damp2*2.0-1.0;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_px(k,ib,j);
taoyy=t2yy(k,i,j)-t2yy_px(k,ib,j);
taozz=t2zz(k,i,j)-t2yy_px(k,ib,j);
if(j>nd2_tyy[1] && j<nd2_tyy[4]) {
//if(j>nd2_tyy(2) .and. j<nd2_tyy(5)) {
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
if(k<nd2_tyy[16]) {
//if(k<nd2_tyy(17)) {
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
}
else {
szz=0.0;
}
sss=syy+szz;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1;
taoxx=taoxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
}
sxx=dxh2(2,i)/ca*(v2x(k,i-1,j)-v2x(k,i,j));
qxx=qt2xx_px(k,ib,j);
qt2xx_px(k,ib,j)=qxx*et+wtp*sxx*et1;
t2xx_px(k,ib,j)=damp1*t2xx_px(k,ib,j)+
damp2*(pm*sxx-qxx-qt2xx_px(k,ib,j));
t2xx(k,i,j)=taoxx+t2xx_px(k,ib,j);
qyy=qt2yy_px(k,ib,j);
qt2yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1;
t2yy_px(k,ib,j)=damp1*t2yy_px(k,ib,j)+
damp2*(cl*sxx-qyy-qt2yy_px(k,ib,j));
t2yy(k,i,j)=taoyy+t2yy_px(k,ib,j);
t2zz(k,i,j)=taozz+t2yy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlY_II(int nxb2,
int nyb2,
int nztop,
int nxbtm,
int nzbtm,
int mw2_pml1,
int lby0,
int lby1,
int *nd2_tyy,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_yM,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pyM,
float *t2yy_pyM,
float *qt2xx_pyM,
float *qt2yy_pyM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the stress-norm at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
//if( lby[0] > lby[1] ) return;
//if( lby(1)>lby(2) ) return;
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_tyy[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j=nd2_tyy[4*k]; j <= nd2_tyy[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_tyy[6]; i <= nd2_tyy[11]; i++)
// //do i=nd2_tyy(7),nd2_tyy(12)
// {
// jb=0;
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j=nd2_tyy[4*lb]; j <= nd2_tyy[1+4*lb]; j++)
//do j=nd2_tyy(1+4*lb),nd2_tyy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rti=drti2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1;
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(13),nd2_tyy(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rti);
damp1=damp2*2.0-1.;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_py(k,i,jb);
taoyy=t2yy(k,i,j)-t2yy_py(k,i,jb);
taozz=t2zz(k,i,j)-t2xx_py(k,i,jb);
if(k<nd2_tyy[16]) {
//if(k<nd2_tyy(17)) then
szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+
dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j);
if(i>nd2_tyy[7] && i<nd2_tyy[10]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) {
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
}
else {
sxx=0.0;
}
sss=sxx+szz;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1;
taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1;
taoyy=taoyy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1;
taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j);
}
else {
if(i>nd2_tyy[7] && i<nd2_tyy[10]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) then
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+wtp*sxx*et1;
taoxx=taoxx+pm*sxx-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp-wts)*sxx*et1;
taoyy=taoyy+cl*sxx-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp-wts)*sxx*et1;
taozz=taozz+cl*sxx-qzz-qt2zz(k,i,j);
}
}
syy=dyh2(2,j)/ca*(v2y(k,i,j-1)-v2y(k,i,j));
qxx=qt2xx_py(k,i,jb);
qt2xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1;
t2xx_py(k,i,jb)=damp1*t2xx_py(k,i,jb)+damp2*(cl*syy-qxx-qt2xx_py(k,i,jb));
t2xx(k,i,j)=taoxx+t2xx_py(k,i,jb);
t2zz(k,i,j)=taozz+t2xx_py(k,i,jb);
qyy=qt2yy_py(k,i,jb);
qt2yy_py(k,i,jb)=qyy*et+wtp*syy*et1;
t2yy_py(k,i,jb)=damp1*t2yy_py(k,i,jb)+damp2*(pm*syy-qyy-qt2yy_py(k,i,jb));
t2yy(k,i,j)=taoyy+t2yy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_norm_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nztop,
int nxbtm,
int nzbtm,
int *nd2_tyy,
int *idmat2M,
float ca,
float *damp2_zM,
float *drth2M,
float *clamdaM,
float *cmuM,
float *epdtM,
float *qwpM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxh2M,
float *dyh2M,
float *dzi2M,
float *t2xxM,
float *t2yyM,
float *t2zzM,
float *qt2xxM,
float *qt2yyM,
float *qt2zzM,
float *t2xx_pzM,
float *t2zz_pzM,
float *qt2xx_pzM,
float *qt2zz_pzM,
float *v2xM,
float *v2yM,
float *v2zM)
//Compute the stress-norm at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, &
// damp2,damp1,cl,sm2,pm,et,et1,wtp,wts
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[6];
if (j > nd2_tyy[5] || i > nd2_tyy[11])
{
return;
}
// for (j = nd2_tyy[0]; j <= nd2_tyy[5]; j++)
// //do j=nd2_tyy(1),nd2_tyy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i=nd2_tyy[6]; i <= nd2_tyy[11]; i++)
// //do i=nd2_tyy(7),nd2_tyy(12)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_tyy[16]; k <= nd2_tyy[17]; k++)
//do k=nd2_tyy(17),nd2_tyy(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drth2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
cl=clamda(inod);
sm2=2.*cmu(inod);
pm=cl+sm2;
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw));
wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxx=t2xx(k,i,j)-t2xx_pz(kb,i,j);
taoyy=t2yy(k,i,j)-t2xx_pz(kb,i,j);
taozz=t2zz(k,i,j)-t2zz_pz(kb,i,j);
if(i>nd2_tyy[7] && i<nd2_tyy[10] && j>nd2_tyy[1] && j<nd2_tyy[4]) {
//if(i>nd2_tyy(8) .and. i<nd2_tyy(11) .and. &
// j>nd2_tyy(2) .and. j<nd2_tyy(5)) then
sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+
dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j);
syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+
dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1);
sss=sxx+syy;
qxx=qt2xx(k,i,j);
qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*syy)*et1;
taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j);
qyy=qt2yy(k,i,j);
qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*sxx)*et1;
taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j);
qzz=qt2zz(k,i,j);
qt2zz(k,i,j)=qzz*et+(wtp-wts)*sss*et1;
taozz=taozz+cl*sss-qzz-qt2zz(k,i,j);
}
szz=dzi2(2,k)/ca*(v2z(k,i,j)-v2z(k+1,i,j));
qxx=qt2xx_pz(kb,i,j);
qt2xx_pz(kb,i,j)=qxx*et+(wtp-wts)*szz*et1;
t2xx_pz(kb,i,j)=damp1*t2xx_pz(kb,i,j)+
damp2*(cl*szz-qxx-qt2xx_pz(kb,i,j));
t2xx(k,i,j)=taoxx+t2xx_pz(kb,i,j);
t2yy(k,i,j)=taoyy+t2xx_pz(kb,i,j);
qzz=qt2zz_pz(kb,i,j);
qt2zz_pz(kb,i,j)=qzz*et+wtp*szz*et1;
t2zz_pz(kb,i,j)=damp1*t2zz_pz(kb,i,j)+
damp2*(pm*szz-qzz-qt2zz_pz(kb,i,j));
t2zz(k,i,j)=taozz+t2zz_pz(kb,i,j);
}
// }
// }
return;
}
__global__ void stress_xy_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nxbtm,
int nybtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_txy,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *t2xy_pxM,
float *qt2xy_pxM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if(lbx[0] > lbx[1]) return;
nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0;
//nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1)
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_txy[5] || lb > lbx1)
{
return;
}
ib = 0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_txy[6+4*k]; i <= nd2_txy[7+4*k]; i++)
{
ib++;
}
}
// for (j = nd2_txy[0]; j <= nd2_txy[5]; j++)
// //do j=nd2_txy(1),nd2_txy(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_txy[6+4*lb]; i <= nd2_txy[7+4*lb]; i++)
//do i=nd2_txy(7+4*lb),nd2_txy(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txy[12]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(13),nd2_txy(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rth);
damp1=damp2*2.0-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t2xy(k,i,j)-t2xy_px(k,ib,j);
if(j > nd2_txy[1] && j<nd2_txy[4]) {
//if(j>nd2_txy(2) .and. j<nd2_txy(5)) then
cusxy=(dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j)+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j);
}
cusxy=sm*dxi2(2,i)/ca*(v2y(k,i,j)-v2y(k,i+1,j));
qxy=qt2xy_px(k,ib,j);
qt2xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1;
t2xy_px(k,ib,j)=damp1*t2xy_px(k,ib,j)+
damp2*(cusxy-qxy-qt2xy_px(k,ib,j));
t2xy(k,i,j)=taoxy+t2xy_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlY_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nztop,
int nxbtm,
int nzbtm,
int lby0,
int lby1,
int *nd2_txy,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *t2xy_pyM,
float *qt2xy_pyM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_txy[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j=nd2_txy[4*k]; j <= nd2_txy[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_txy[6]; i <= nd2_txy[11]; i++)
// //do i=nd2_txy(7),nd2_txy(12)
// {
// jb=0;
// for (lb=lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j=nd2_txy[4*lb]; j <= nd2_txy[1+4*lb]; j++)
//do j=nd2_txy(1+4*lb),nd2_txy(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txy[12]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(13),nd2_txy(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxy=t2xy(k,i,j)-t2xy_py(k,i,jb);
if(i>nd2_txy[7] && i<nd2_txy[10]) {
//if(i>nd2_txy(8) .and. i<nd2_txy(11)) then
cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i,j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j);
}
cusxy=sm*dyi2(2,j)/ca*(v2x(k,i,j)-v2x(k,i,j+1));
qxy=qt2xy_py(k,i,jb);
qt2xy_py(k,i,jb)=qxy*et+dmws*cusxy*et1;
t2xy_py(k,i,jb)=damp1*t2xy_py(k,i,jb)+
damp2*(cusxy-qxy-qt2xy_py(k,i,jb));
t2xy(k,i,j)=taoxy+t2xy_py(k,i,jb);
}
}
// }
// }
return;
}
__global__ void stress_xy_PmlZ_II(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_txy,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dyi2M,
float *t2xyM,
float *qt2xyM,
float *v2xM,
float *v2yM)
//Compute the Stress-xy at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusxy,qxy,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusxy,qxy,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[2];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txy[8];
if (j > nd2_txy[3] || i > nd2_txy[9])
{
return;
}
// for (j = nd2_txy[2]; j <= nd2_txy[3]; j++)
// //do j=nd2_txy(3),nd2_txy(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_txy[8]; i <= nd2_txy[9]; i++)
// //do i=nd2_txy(9),nd2_txy(10)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k=nd2_txy[16]; k <= nd2_txy[17]; k++)
//do k=nd2_txy(17),nd2_txy(18)
{
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+
dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+
dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+
dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm;
qxy=qt2xy(k,i,j);
qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1;
t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j);
}
// }
// }
return;
}
__global__ void stress_xz_PmlX_IIC(int nxb2,
int nyb2,
int mw2_pml,
int mw2_pml1,
int nxbtm,
int nybtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_txz,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_xM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *t2xzM,
float *qt2xzM,
float *t2xz_pxM,
float *qt2xz_pxM,
float *v2xM,
float *v2zM)
//Compute the stress-xz at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,ib,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1;
int nth;
//if(lbx[0] > lbx[1]) return;
nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_txz[5] || lb > lbx1)
{
return;
}
ib=0;
for (k = lbx0; k < lb; k++)
{
for (i=nd2_txz[6+4*k]; i <= nd2_txz[7+4*k]; i++)
{
ib++;
}
}
// for (j = nd2_txz[0]; j <= nd2_txz[5]; j++)
// //do j=nd2_txz(1),nd2_txz(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// ib=0;
// for (lb=lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
kb=0;
for (i=nd2_txz[6+4*lb]; i <= nd2_txz[7+4*lb]; i++)
//do i=nd2_txz(7+4*lb),nd2_txz(8+4*lb)
{
kb=kb+1;
ib=ib+1;
rth=drth2(kb,lb);
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txz[12]; k <= nd2_txz[17]; k++)
//do k=nd2_txz(13),nd2_txz(18)
{
damp2=1./(1.+damp2_x(k,j,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxz=t2xz(k,i,j)-t2xz_px(k,ib,j);
if(k < nd2_txz[16]) {
//if(k<nd2_txz(17)) then
cusxz=(dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k,i,j)+dzh2(4,k)*v2x(k+1,i,j))*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j);
}
cusxz=sm*dxi2(2,i)/ca*(v2z(k,i,j)-v2z(k,i+1,j));
qxz=qt2xz_px(k,ib,j);
qt2xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1;
t2xz_px(k,ib,j)=damp1*t2xz_px(k,ib,j)+
damp2*(cusxz-qxz-qt2xz_px(k,ib,j));
t2xz(k,i,j)=taoxz+t2xz_px(k,ib,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlY_IIC(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int lby0,
int lby1,
int *nd2_txz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *v2xM,
float *v2zM,
float *t2xzM,
float *qt2xzM)
//Compute the stress-xz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[8];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_txz[9] || lb > lby1)
{
return;
}
// for (i = nd2_txz[8]; i <= nd2_txz[9]; i++)
// //do i=nd2_txz(9),nd2_txz(10)
// {
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
for (j=nd2_txz[4*lb]; j <= nd2_txz[1+4*lb]; j++)
//do j=nd2_txz(1+4*lb),nd2_txz(2+4*lb)
{
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_txz[12]; k <= nd2_txz[15]; k++)
//do k=nd2_txz(13),nd2_txz(16)
{
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
dvzx=dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j);
dvxz=dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+
dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j);
cusxz=(dvzx+dvxz)*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j);
}
}
// }
// }
return;
}
__global__ void stress_xz_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_txz,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_zM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dxi2M,
float *dzh2M,
float *t2xzM,
float *qt2xzM,
float *t2xz_pzM,
float *qt2xz_pzM,
float *v2xM,
float *v2zM)
//Compute the stress-xz at region of PML-z-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txz[6];
if (j > nd2_txz[5] || i > nd2_txz[11])
{
return;
}
// for (j = nd2_txz[0]; j <= nd2_txz[5]; j++)
// //do j=nd2_txz(1),nd2_txz(6)
// {
kodd = 2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_txz[6]; i <= nd2_txz[11]; i++)
// //do i=nd2_txz(7),nd2_txz(12)
// {
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_txz[16]; k <= nd2_txz[17]; k++)
//do k=nd2_txz(17),nd2_txz(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drti2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoxz=t2xz(k,i,j)-t2xz_pz(kb,i,j);
if(i > nd2_txz[7] && i<nd2_txz[10]) {
//if(i>nd2_txz(8) .and. i<nd2_txz(11)) then
cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+
dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j))*sm;
qxz=qt2xz(k,i,j);
qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1;
taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j);
}
cusxz=sm*dzh2(2,k)/ca*(v2x(k-1,i,j)-v2x(k,i,j));
qxz=qt2xz_pz(kb,i,j);
qt2xz_pz(kb,i,j)=qxz*et+dmws*cusxz*et1;
t2xz_pz(kb,i,j)=damp1*t2xz_pz(kb,i,j)+
damp2*(cusxz-qxz-qt2xz_pz(kb,i,j));
t2xz(k,i,j)=taoxz+t2xz_pz(kb,i,j);
}
// }
// }
return;
}
//call stress_yz_PmlX_II
__global__ void stress_yz_PmlX_IIC(int nxb2,
int nyb2,
int nxbtm,
int nzbtm,
int nztop,
int lbx0,
int lbx1,
int *nd2_tyz,
int *idmat2M,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-x-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kodd,jkq,inod,irw
//real:: cusyz,qyz,sm,dmws,et,et1
{
int i,j,k,lb,kodd,jkq,inod,irw;
float cusyz,qyz,sm,dmws,et,et1;
//if(lbx[0] > lbx[1]) return;
//if( lbx(1)>lbx(2) ) return
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2];
lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0;
if (j > nd2_tyz[3] || lb > lbx1)
{
return;
}
// for (j=nd2_tyz[2]; j <= nd2_tyz[3]; j++)
// //do j=nd2_tyz(3),nd2_tyz(4)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (lb = lbx[0]; lb <= lbx[1]; lb++)
// //do lb=lbx(1),lbx(2)
// {
for (i = nd2_tyz[6+4*lb]; i <= nd2_tyz[7+4*lb]; i++)
//do i=nd2_tyz(7+4*lb),nd2_tyz(8+4*lb)
{
jkq=((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyz[12]; k <= nd2_tyz[15]; k++)
//do k=nd2_tyz(13),nd2_tyz(16)
{
inod=idmat2(k,i,j);
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+
dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/
(.5/cmu(inod)+.5/cmu(idmat2(k-1,i,j+1)));
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j);
}
}
// }
// }
return;
}
//call stress_yz_PmlY_II
__global__ void stress_yz_PmlY_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int lby0,
int lby1,
int *nd2_tyz,
int *idmat2M,
float ca,
float *drth2M,
float *damp2_yM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *t2yz_pyM,
float *qt2yz_pyM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,jb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1;
//if(lby[0] > lby[1]) return;
//if( lby(1)>lby(2) ) return
i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[6];
lb = blockIdx.y * blockDim.y + threadIdx.y + lby0;
if (i > nd2_tyz[11] || lb > lby1)
{
return;
}
jb = 0;
for (k = lby0; k < lb; k++)
{
for (j = nd2_tyz[4*k]; j <= nd2_tyz[1+4*k]; j++)
{
jb++;
}
}
// for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++)
// //do i=nd2_tyz(7),nd2_tyz(12)
// {
// jb=0;
// for (lb = lby[0]; lb <= lby[1]; lb++)
// //do lb=lby(1),lby(2)
// {
kb=0;
for (j = nd2_tyz[4*lb]; j <= nd2_tyz[1+4*lb]; j++)
//do j=nd2_tyz(1+4*lb),nd2_tyz(2+4*lb)
{
kb=kb+1;
jb=jb+1;
rth=drth2(kb,lb);
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
jkq = ((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
for (k = nd2_tyz[12]; k <= nd2_tyz[17]; k++)
//do k=nd2_tyz(13),nd2_tyz(18)
{
damp2=1./(1.+damp2_y(k,i,lb)*rth);
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2)
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoyz=t2yz(k,i,j)-t2yz_py(k,i,jb);
if(k<nd2_tyz[16]) {
//if(k<nd2_tyz(17)) {
cusyz=(dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+
dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))*sm;
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j);
}
cusyz=sm*dyi2(2,j)/ca*(v2z(k,i,j)-v2z(k,i,j+1));
qyz=qt2yz_py(k,i,jb);
qt2yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1;
t2yz_py(k,i,jb)=damp1*t2yz_py(k,i,jb)+
damp2*(cusyz-qyz-qt2yz_py(k,i,jb));
t2yz(k,i,j)=taoyz+t2yz_py(k,i,jb);
}
}
// }
// }
return;
}
//call stress_yz_PmlZ_II
__global__ void stress_yz_PmlZ_IIC(int nxb2,
int nyb2,
int mw2_pml1,
int nxbtm,
int nzbtm,
int nztop,
int *nd2_tyz,
int *idmat2M,
float ca,
float *drti2M,
float *damp2_zM,
float *cmuM,
float *epdtM,
float *qwsM,
float *qwt1M,
float *qwt2M,
float *dyi2M,
float *dzh2M,
float *t2yzM,
float *qt2yzM,
float *t2yz_pzM,
float *qt2yz_pzM,
float *v2yM,
float *v2zM)
//Compute the stress-yz at region of PML-y-II
//use grid_node_comm
//use wave_field_comm
//implicit NONE
//integer:: i,j,k,lb,kb,kodd,jkq,inod,irw
//real:: taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1
{
int i,j,k,lb,kb,kodd,jkq,inod,irw;
float taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1;
j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[0];
i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[6];
if (j > nd2_tyz[5] || i > nd2_tyz[11])
{
return;
}
// for (j = nd2_tyz[0]; j <= nd2_tyz[5]; j++)
// //do j=nd2_tyz(1),nd2_tyz(6)
// {
kodd=2*((j+nyb2)&1)+1;
//kodd=2*mod(j+nyb2,2)+1
// for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++)
// //do i=nd2_tyz(7),nd2_tyz(12)
// {
jkq = ((i+nxb2)&1)+kodd;
//jkq=mod(i+nxb2,2)+kodd
kb=0;
for (k = nd2_tyz[16]; k <= nd2_tyz[17]; k++)
//do k=nd2_tyz(17),nd2_tyz(18)
{
kb=kb+1;
damp2=1./(1.+damp2_z(i,j)*drti2(kb,1));
damp1=damp2*2.-1.;
inod=idmat2(k,i,j);
sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1)));
irw=jkq+4*((k+nztop)&1);
//irw=jkq+4*mod(k+nztop,2);
et=epdt(irw);
et1=1.0-et;
dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw));
taoyz=t2yz(k,i,j)-t2yz_pz(kb,i,j);
if (j > nd2_tyz[1] && j<nd2_tyz[4]) {
//if(j>nd2_tyz(2) .and. j<nd2_tyz(5)) then
cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j)+
dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2))*sm;
qyz=qt2yz(k,i,j);
qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1;
taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j);
}
cusyz=sm*dzh2(2,k)/ca*(v2y(k-1,i,j)-v2y(k,i,j));
qyz=qt2yz_pz(kb,i,j);
qt2yz_pz(kb,i,j)=qyz*et+dmws*cusyz*et1;
t2yz_pz(kb,i,j)=damp1*t2yz_pz(kb,i,j)+
damp2*(cusyz-qyz-qt2yz_pz(kb,i,j));
t2yz(k,i,j)=taoyz+t2yz_pz(kb,i,j);
}
// }
// }
return;
}
#ifdef __cplusplus
extern "C" {
#endif
void compute_stressC(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml,
int *mw1_pml1, int *nmat, int *nll, int *lbx, int *lby, int *nd1_txy, int *nd1_txz,
int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM,
float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M,
float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM,
float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM,
float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM,
float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM,
float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp,
int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz,
int *nd2_tyy, int *nd2_tyz, int *idmat2M,
float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM,
float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM,
float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM,
float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M,
float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM,
float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM,
float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM,
float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM,
void **v2xMp, void **v2yMp, void **v2zMp, int *myid)
{
//printf("[CUDA] stress computation:\n");
float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM;
int blockSizeX = 8;
int blockSizeY = 8;
dim3 dimBlock(blockSizeX, blockSizeY);
v1xM = (float *) *v1xMp;
v1yM = (float *) *v1yMp;
v1zM = (float *) *v1zMp;
v2xM = (float *) *v2xMp;
v2yM = (float *) *v2yMp;
v2zM = (float *) *v2zMp;
gettimeofday(&t1, NULL);
cpy_h2d_stressInputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
cpy_h2d_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM,
t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeH2DS += tmpTime;
gettimeofday(&t1, NULL);
int gridSizeX1 = (nd1_tyy[3] - nd1_tyy[2])/blockSizeX + 1;
int gridSizeY1 = (nd1_tyy[9] - nd1_tyy[8])/blockSizeY + 1;
dim3 dimGrid1(gridSizeX1, gridSizeY1);
//int size = (*nztop) * (*nxtop + 3) * (*nytop);
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM0.txt");
stress_norm_xy_IC<<<dimGrid1, dimBlock>>>(*nxb1,
*nyb1,
*nxtop,
*nztop,
nd1_tyyD,
idmat1D,
*ca,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh1D,
dyh1D,
dxi1D,
dyi1D,
dzi1D,
t1xxD,
t1xyD,
t1yyD,
t1zzD,
qt1xxD,
qt1xyD,
qt1yyD,
qt1zzD,
v1xD,
v1yD,
v1zD);
int gridSizeX2 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1;
int gridSizeY2 = (nd1_tyz[9] - nd1_tyz[8])/blockSizeY + 1;
dim3 dimGrid2(gridSizeX2, gridSizeY2);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM1.txt");
stress_xz_yz_IC<<<dimGrid2, dimBlock>>>(*nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
nd1_tyzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
dzh1D,
v1xD,
v1yD,
v1zD,
t1xzD,
t1yzD,
qt1xzD,
qt1yzD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM2.txt");
int gridSizeX3Temp1 = ((*ny1p1) + 1)/blockSizeX + 1;
int gridSizeX3Temp2 = ((*nytop) - 1)/blockSizeX + 1;
int gridSizeY3Temp1 = ((*nxtop) - 1)/blockSizeY + 1;
int gridSizeY3Temp2 = ((*nx1p1) + 1)/blockSizeY + 1;
int gridSizeX3 = (gridSizeX3Temp1 > gridSizeX3Temp2) ? gridSizeX3Temp1 : gridSizeX3Temp2;
int gridSizeY3 = (gridSizeY3Temp1 > gridSizeY3Temp2) ? gridSizeY3Temp1 : gridSizeY3Temp2;
dim3 dimGrid3(gridSizeX3, gridSizeY3);
stress_resetVars<<<dimGrid3, dimBlock>>>(*ny1p1,
*nx1p1,
*nxtop,
*nytop,
*nztop,
t1xzD,
t1yzD);
if (lbx[1] >= lbx[0])
{
int gridSizeX4 = (nd1_tyy[5] - nd1_tyy[0])/blockSizeX + 1;
int gridSizeY4 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid4(gridSizeX4, gridSizeY4);
//debug
/*float *t1xx_px=(float*)malloc(sizeof(float) * (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop));
cudaMemcpy(t1xx_px, t1xx_pxD, sizeof(float) * (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop), cudaMemcpyDeviceToHost);
write_output(t1xx_px, (*nztop) * ((lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]) * (*nytop), "OUTPUT_ARRAYS/t1xx_px_cuda.txt");*/
stress_norm_PmlX_IC<<<dimGrid4, dimBlock>>>(*nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
*mw1_pml,
*mw1_pml1,
lbx[0],
lbx[1],
nd1_tyyD,
idmat1D,
*ca,
drti1D,
damp1_xD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dzi1D,
dxh1D,
dyh1D,
v1xD,
v1yD,
v1zD,
t1xxD,
t1yyD,
t1zzD,
t1xx_pxD,
t1yy_pxD,
qt1xxD,
qt1yyD,
qt1zzD,
qt1xx_pxD,
qt1yy_pxD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM3.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX5 = (nd1_tyy[11] - nd1_tyy[6])/blockSizeX + 1;
int gridSizeY5 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid5(gridSizeX5, gridSizeY5);
stress_norm_PmlY_IC<<<dimGrid5, dimBlock>>>(*nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_tyyD,
idmat1D,
*ca,
drti1D,
damp1_yD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh1D,
dyh1D,
dzi1D,
t1xxD,
t1yyD,
t1zzD,
qt1xxD,
qt1yyD,
qt1zzD,
t1xx_pyD,
t1yy_pyD,
qt1xx_pyD,
qt1yy_pyD,
v1xD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM4.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX6 = (nd1_txy[5] - nd1_txy[0])/blockSizeX + 1;
int gridSizeY6 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid6(gridSizeX6, gridSizeY6);
stress_xy_PmlX_IC<<<dimGrid6, dimBlock>>>(*nxb1,
*nyb1,
*mw1_pml,
*mw1_pml1,
*nxtop,
*nytop,
*nztop,
lbx[0],
lbx[1],
nd1_txyD,
idmat1D,
*ca,
drth1D,
damp1_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
t1xyD,
qt1xyD,
t1xy_pxD,
qt1xy_pxD,
v1xD,
v1yD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM5.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX7 = (nd1_txy[11] - nd1_txy[6])/blockSizeX + 1;
int gridSizeY7 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid7(gridSizeX7, gridSizeY7);
stress_xy_PmlY_IC<<<dimGrid7, dimBlock>>>(*nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_txyD,
idmat1D,
*ca,
drth1D,
damp1_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dyi1D,
t1xyD,
qt1xyD,
t1xy_pyD,
qt1xy_pyD,
v1xD,
v1yD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM6.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX8 = (nd1_txz[5] - nd1_txz[0])/blockSizeX + 1;
int gridSizeY8 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid8(gridSizeX8, gridSizeY8);
stress_xz_PmlX_IC<<<dimGrid8, dimBlock>>>(*nxb1,
*nyb1,
*nxtop,
*nytop,
*nztop,
*mw1_pml,
*mw1_pml1,
lbx[0],
lbx[1],
nd1_txzD,
idmat1D,
*ca,
drth1D,
damp1_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dzh1D,
t1xzD,
qt1xzD,
t1xz_pxD,
qt1xz_pxD,
v1xD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM7.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX9 = (nd1_txz[9] - nd1_txz[8])/blockSizeX + 1;
int gridSizeY9 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid9(gridSizeX9, gridSizeY9);
stress_xz_PmlY_IC<<<dimGrid9, dimBlock>>>(*nxb1,
*nyb1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_txzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi1D,
dzh1D,
t1xzD,
qt1xzD,
v1xD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM8.txt");
}
if (lbx[1] >= lbx[0])
{
int gridSizeX10 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1;
int gridSizeY10 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid10(gridSizeX10, gridSizeY10);
stress_yz_PmlX_IC<<<dimGrid10, dimBlock>>>(*nxb1,
*nyb1,
*nztop,
*nxtop,
lbx[0],
lbx[1],
nd1_tyzD,
idmat1D,
*ca,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi1D,
dzh1D,
t1yzD,
qt1yzD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM9.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX11 = (nd1_tyz[11] - nd1_tyz[6])/blockSizeX + 1;
int gridSizeY11 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid11(gridSizeX11, gridSizeY11);
stress_yz_PmlY_IC<<<dimGrid11,dimBlock>>>(*nxb1,
*nyb1,
*mw1_pml1,
*nxtop,
*nztop,
lby[0],
lby[1],
nd1_tyzD,
idmat1D,
*ca,
drth1D,
damp1_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi1D,
dzh1D,
t1yzD,
qt1yzD,
t1yz_pyD,
qt1yz_pyD,
v1yD,
v1zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM10.txt");
}
int gridSizeX12 = (nd2_tyy[3] - nd2_tyy[2])/blockSizeX + 1;
int gridSizeY12 = (nd2_tyy[9] - nd2_tyy[8])/blockSizeY + 1;
dim3 dimGrid12(gridSizeX12, gridSizeY12);
stress_norm_xy_II<<<dimGrid12, dimBlock>>>(*nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
nd2_tyyD,
idmat2D,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
t2xxD,
t2xyD,
t2yyD,
t2zzD,
qt2xxD,
qt2xyD,
qt2yyD,
qt2zzD,
dxh2D,
dyh2D,
dxi2D,
dyi2D,
dzi2D,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM11.txt");
int gridSizeX13 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1;
int gridSizeY13 = (nd2_tyz[9] - nd2_tyz[8])/blockSizeY + 1;
dim3 dimGrid13(gridSizeX13, gridSizeY13);
stress_xz_yz_IIC<<<dimGrid13, dimBlock>>>(*nxb2,
*nyb2,
*nztop,
*nxbtm,
*nzbtm,
nd2_tyzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
dzh2D,
t2xzD,
t2yzD,
qt2xzD,
qt2yzD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM12.txt");
if (lbx[1] >= lbx[0])
{
int gridSizeX14 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1;
int gridSizeY14 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid14(gridSizeX14, gridSizeY14);
stress_norm_PmlX_IIC<<<dimGrid14, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nztop,
*nxbtm,
*nybtm,
*nzbtm,
lbx[0],
lbx[1],
nd2_tyyD,
idmat2D,
*ca,
drti2D,
damp2_xD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pxD,
t2yy_pxD,
qt2xx_pxD,
qt2yy_pxD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM13.txt");
}
if (lby[1] >= lby[0])
{
int gridSizeX15 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeX + 1;
int gridSizeY15 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid15(gridSizeX15, gridSizeY15);
stress_norm_PmlY_II<<<dimGrid15, dimBlock>>>(*nxb2,
*nyb2,
*nztop,
*nxbtm,
*nzbtm,
*mw2_pml1,
lby[0],
lby[1],
nd2_tyyD,
idmat2D,
*ca,
drti2D,
damp2_yD,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pyD,
t2yy_pyD,
qt2xx_pyD,
qt2yy_pyD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM14.txt");
}
int gridSizeX16 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1;
int gridSizeY16 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeY + 1;
dim3 dimGrid16(gridSizeX16, gridSizeY16);
stress_norm_PmlZ_IIC<<<dimGrid16, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nztop,
*nxbtm,
*nzbtm,
nd2_tyyD,
idmat2D,
*ca,
damp2_zD,
drth2D,
clamdaD,
cmuD,
epdtD,
qwpD,
qwsD,
qwt1D,
qwt2D,
dxh2D,
dyh2D,
dzi2D,
t2xxD,
t2yyD,
t2zzD,
qt2xxD,
qt2yyD,
qt2zzD,
t2xx_pzD,
t2zz_pzD,
qt2xx_pzD,
qt2zz_pzD,
v2xD,
v2yD,
v2zD);
// for debug
//cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
//write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM15.txt");
if (lbx[1] >= lbx[0])
{
int gridSizeX17 = (nd2_txy[5] - nd2_txy[0])/blockSizeX + 1;
int gridSizeY17 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid17(gridSizeX17, gridSizeY17);
stress_xy_PmlX_IIC<<<dimGrid17, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nxbtm,
*nybtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_txyD,
idmat2D,
*ca,
drth2D,
damp2_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
t2xy_pxD,
qt2xy_pxD,
v2xD,
v2yD);
}
if (lby[1] >= lby[0])
{
int gridSizeX18 = (nd2_txy[11] - nd2_txy[6])/blockSizeX + 1;
int gridSizeY18 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid18(gridSizeX18, gridSizeY18);
stress_xy_PmlY_IIC<<<dimGrid18, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml1,
*nztop,
*nxbtm,
*nzbtm,
lby[0],
lby[1],
nd2_txyD,
idmat2D,
*ca,
drth2D,
damp2_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
t2xy_pyD,
qt2xy_pyD,
v2xD,
v2yD);
}
int gridSizeX19 = (nd2_txy[3] - nd2_txy[2])/blockSizeX + 1;
int gridSizeY19 = (nd2_txy[9] - nd2_txy[8])/blockSizeY + 1;
dim3 dimGrid19(gridSizeX19, gridSizeY19);
stress_xy_PmlZ_II<<<dimGrid19, dimBlock>>>(*nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
nd2_txyD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dyi2D,
t2xyD,
qt2xyD,
v2xD,
v2yD);
if (lbx[1] >= lbx[0])
{
int gridSizeX20 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1;
int gridSizeY20 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid20(gridSizeX20, gridSizeY20);
stress_xz_PmlX_IIC<<<dimGrid20, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml,
*mw2_pml1,
*nxbtm,
*nybtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_txzD,
idmat2D,
*ca,
drth2D,
damp2_xD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
t2xzD,
qt2xzD,
t2xz_pxD,
qt2xz_pxD,
v2xD,
v2zD);
}
if (lby[1] >= lby[0])
{
int gridSizeX21 = (nd2_txz[9] - nd2_txz[8])/blockSizeX + 1;
int gridSizeY21 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid21(gridSizeX21, gridSizeY21);
stress_xz_PmlY_IIC<<<dimGrid21, dimBlock>>>(*nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
lby[0],
lby[1],
nd2_txzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
v2xD,
v2zD,
t2xzD,
qt2xzD);
}
int gridSizeX22 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1;
int gridSizeY22 = (nd2_txz[11] - nd2_txz[6])/blockSizeY + 1;
dim3 dimGrid22(gridSizeX22, gridSizeY22);
stress_xz_PmlZ_IIC<<<dimGrid22, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
nd2_txzD,
idmat2D,
*ca,
drti2D,
damp2_zD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dxi2D,
dzh2D,
t2xzD,
qt2xzD,
t2xz_pzD,
qt2xz_pzD,
v2xD,
v2zD);
if (lbx[1] >= lbx[0])
{
int gridSizeX23 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1;
int gridSizeY23 = (lbx[1] - lbx[0])/blockSizeY + 1;
dim3 dimGrid23(gridSizeX23, gridSizeY23);
stress_yz_PmlX_IIC<<<dimGrid23, dimBlock>>>(*nxb2,
*nyb2,
*nxbtm,
*nzbtm,
*nztop,
lbx[0],
lbx[1],
nd2_tyzD,
idmat2D,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
v2yD,
v2zD);
}
if (lby[1] >= lby[0])
{
int gridSizeX24 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeX + 1;
int gridSizeY24 = (lby[1] - lby[0])/blockSizeY + 1;
dim3 dimGrid24(gridSizeX24, gridSizeY24);
stress_yz_PmlY_IIC<<<dimGrid24, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
lby[0],
lby[1],
nd2_tyzD,
idmat2D,
*ca,
drth2D,
damp2_yD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
t2yz_pyD,
qt2yz_pyD,
v2yD,
v2zD);
}
int gridSizeX25 = (nd2_tyz[5] - nd2_tyz[0])/blockSizeX + 1;
int gridSizeY25 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeY + 1;
dim3 dimGrid25(gridSizeX25, gridSizeY25);
stress_yz_PmlZ_IIC<<<dimGrid25, dimBlock>>>(*nxb2,
*nyb2,
*mw2_pml1,
*nxbtm,
*nzbtm,
*nztop,
nd2_tyzD,
idmat2D,
*ca,
drti2D,
damp2_zD,
cmuD,
epdtD,
qwsD,
qwt1D,
qwt2D,
dyi2D,
dzh2D,
t2yzD,
qt2yzD,
t2yz_pzD,
qt2yz_pzD,
v2yD,
v2zD);
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeCompS += tmpTime;
gettimeofday(&t1, NULL);
cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM,
t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm);
gettimeofday(&t2, NULL);
tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0;
totalTimeD2HS += tmpTime;
// for debug
// int size = (*nztop) * (*nxtop + 3) * (*nytop);
// write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM.txt");
// size = (*nztop) * (*nxtop + 3) * (*nytop + 3);
// write_output(t1xyM, size, "OUTPUT_ARRAYS/t1xyM.txt");
// size = (*nztop + 1) * (*nxtop + 3) * (*nytop);
// write_output(t1xzM, size, "OUTPUT_ARRAYS/t1xzM.txt");
// size = (*nztop) * (*nxtop) * (*nytop + 3);
// write_output(t1yyM, size, "OUTPUT_ARRAYS/t1yyM.txt");
// size = (*nztop + 1) * (*nxtop) * (*nytop + 3);
// write_output(t1yzM, size, "OUTPUT_ARRAYS/t1yzM.txt");
// size = (*nztop) * (*nxtop) * (*nytop);
// write_output(t1zzM, size, "OUTPUT_ARRAYS/t1zzM.txt");
// size = (*nzbtm) * (*nxbtm + 3) * (*nybtm);
// write_output(t2xxM, size, "OUTPUT_ARRAYS/t2xxM.txt");
// size = (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3);
// write_output(t2xyM, size, "OUTPUT_ARRAYS/t2xyM.txt");
// size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm);
// write_output(t2xzM, size, "OUTPUT_ARRAYS/t2xzM.txt");
// size = (*nzbtm) * (*nxbtm) * (*nybtm + 3);
// write_output(t2yyM, size, "OUTPUT_ARRAYS/t2yyM.txt");
// size = (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3);
// write_output(t2yzM, size, "OUTPUT_ARRAYS/t2yzM.txt");
// size = (*nzbtm + 1) * (*nxbtm) * (*nybtm);
// write_output(t2zzM, size, "OUTPUT_ARRAYS/t2zzM.txt");
/*************** correctness *******************/
/*
FILE *fp;
// cudaRes = cudaMalloc((void **)&v1xD, sizeof(float) * y(*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x");
// cudaRes = cudaMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y");
// cudaRes = cudaMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z");
const char* filename = "v1x.txt";
const char* filename1 = "v1y.txt";
const char* filename2 = "v1z.txt";
int i;
if((fp = fopen(filename, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1xM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename1, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1yM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename2, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ )
{
fprintf(fp, "%f ", v1zM[i]);
}
fprintf(fp, "\n");
fclose(fp);
// cudaRes = cudaMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx");
// cudaRes = cudaMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy");
// cudaRes = cudaMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz");
// cudaRes = cudaMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy");
// cudaRes = cudaMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz");
// cudaRes = cudaMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop));
// CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz");
const char* filename3 = "x_t1xx.txt";
const char* filename4 = "x_t1xy.txt";
const char* filename5 = "x_t1xz.txt";
if((fp = fopen(filename3, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop); i++ )
{
fprintf(fp, "%f ", t1xxM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename4, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop+3); i++ )
{
fprintf(fp, "%f ", t1xyM[i]);
}
fprintf(fp, "\n");
fclose(fp);
if((fp = fopen(filename5, "w+")) == NULL)
fprintf(stderr, "File write error!\n");
for(i = 0; i< (*nztop+1) * (*nxtop + 3) * (*nytop); i++ )
{
fprintf(fp, "%f ", t1xzM[i]);
}
fprintf(fp, "\n");
fclose(fp);
*/
return;
}
#ifdef __cplusplus
}
#endif
|
0141c4dd412a0eed6c006a76706ee38cac1f2f47.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i+1);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);;
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} | 0141c4dd412a0eed6c006a76706ee38cac1f2f47.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i+1);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);;
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} |
d547603684b91de6c6d192f6c7b4f04c28bb906d.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "im2col.h"
void THNN_CudaSpatialFullConvolution_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *weight,
THCudaTensor *bias,
THCudaTensor *columns,
THCudaTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH)
{
int nInputPlane = THCudaTensor_size(state, weight, 0);
int nOutputPlane = THCudaTensor_size(state, weight, 1);
THCUNN_assertSameGPU(state, 6, input, output, weight,
bias, columns, ones);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
} else {
THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1] * weight->size[2] * weight->size[3];
long n = columns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, input_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, columns), n
);
// Unpack columns back into input:
col2im(
THCState_getCurrentStream(state),
THCudaTensor_data(state, columns),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, output_n)
);
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
THCudaBlas_Sgemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
1,
THCudaTensor_data(state, output_n), n_
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialFullConvolution_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
THCudaTensor *gradColumns,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH)
{
int nInputPlane = THCudaTensor_size(state, weight, 0);
int nOutputPlane = THCudaTensor_size(state, weight, 1);
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Helpers
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradOutput_n),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, gradColumns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = gradColumns->size[1];
long k = weight->size[1] * weight->size[2] * weight->size[3];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, gradColumns), n,
THCudaTensor_data(state, weight), k,
0,
THCudaTensor_data(state, gradInput_n), n
);
}
// Free
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialFullConvolution_accGradParameters(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradWeight,
THCudaTensor *gradBias,
THCudaTensor *columns,
THCudaTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
float scale)
{
int nInputPlane = THCudaTensor_size(state, gradWeight, 0);
int nOutputPlane = THCudaTensor_size(state, gradWeight, 1);
THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight,
gradBias, columns, ones);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradOutput_n),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long n = columns->size[0]; // nOutputPlane * kh * kw
long m = input_n->size[0]; // nInputPlane
long k = columns->size[1]; // inputHeight * inputWidth
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, input_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
if (gradBias) {
THCudaBlas_Sgemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
| d547603684b91de6c6d192f6c7b4f04c28bb906d.cu | #include "THCUNN.h"
#include "im2col.h"
void THNN_CudaSpatialFullConvolution_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
THCudaTensor *weight,
THCudaTensor *bias,
THCudaTensor *columns,
THCudaTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH)
{
int nInputPlane = THCudaTensor_size(state, weight, 0);
int nOutputPlane = THCudaTensor_size(state, weight, 1);
THCUNN_assertSameGPU(state, 6, input, output, weight,
bias, columns, ones);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
THArgCheck(input->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
} else {
THArgCheck(input->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *output_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, output_n, output, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1] * weight->size[2] * weight->size[3];
long n = columns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
'n', 't',
n, m, k,
1,
THCudaTensor_data(state, input_n), n,
THCudaTensor_data(state, weight), m,
0,
THCudaTensor_data(state, columns), n
);
// Unpack columns back into input:
col2im(
THCState_getCurrentStream(state),
THCudaTensor_data(state, columns),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, output_n)
);
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
if (bias) {
THCudaBlas_Sgemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
1,
THCudaTensor_data(state, output_n), n_
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialFullConvolution_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
THCudaTensor *weight,
THCudaTensor *gradColumns,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH)
{
int nInputPlane = THCudaTensor_size(state, weight, 0);
int nOutputPlane = THCudaTensor_size(state, weight, 1);
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
gradColumns, gradInput);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, gradColumns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Helpers
THCudaTensor *gradInput_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(state, gradInput_n, gradInput, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradOutput_n),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, gradColumns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = gradColumns->size[1];
long k = weight->size[1] * weight->size[2] * weight->size[3];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, gradColumns), n,
THCudaTensor_data(state, weight), k,
0,
THCudaTensor_data(state, gradInput_n), n
);
}
// Free
THCudaTensor_free(state, gradInput_n);
THCudaTensor_free(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
}
void THNN_CudaSpatialFullConvolution_accGradParameters(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradWeight,
THCudaTensor *gradBias,
THCudaTensor *columns,
THCudaTensor *ones,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int adjW, int adjH,
float scale)
{
int nInputPlane = THCudaTensor_size(state, gradWeight, 0);
int nOutputPlane = THCudaTensor_size(state, gradWeight, 1);
THCUNN_assertSameGPU(state, 6, input, gradOutput, gradWeight,
gradBias, columns, ones);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(state, gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW;
long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH;
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nOutputPlane*kW*kH, inputHeight*inputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new(state);
THCudaTensor *gradOutput_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_n, input, 0, elt);
THCudaTensor_select(state, gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, gradOutput_n),
nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCudaTensor_data(state, columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long n = columns->size[0]; // nOutputPlane * kh * kw
long m = input_n->size[0]; // nInputPlane
long k = columns->size[1]; // inputHeight * inputWidth
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
THCudaBlas_Sgemm(
state,
't', 'n',
n, m, k,
scale,
THCudaTensor_data(state, columns), k,
THCudaTensor_data(state, input_n), k,
1,
THCudaTensor_data(state, gradWeight), n
);
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long k_ = outputHeight * outputWidth;
// Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices)
if (gradBias) {
THCudaBlas_Sgemv(
state,
't',
k_, m_,
scale,
THCudaTensor_data(state, gradOutput_n), k_,
THCudaTensor_data(state, ones), 1,
1,
THCudaTensor_data(state, gradBias), 1
);
}
}
// Free
THCudaTensor_free(state, input_n);
THCudaTensor_free(state, gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input, nInputPlane, inputHeight, inputWidth);
}
}
|
422e8669fac1cd63da2aa54e4783cc5309a6109e.hip | // !!! This is a file automatically generated by hipify!!!
/*
template<typename T>
struct minmax_pair
{
T min_val;
T max_val;
};
template<typename T>
struct MinMax_initializer
{
__host__ __device__ __forceinline__
minmax_pair<T> operator()(const T &a) const
{
minmax_pair<T> result;
result.min_val = a;
result.max_val = a;
return result;
}
};
template<typename T>
struct MinMax_operator
{
__host__ __device__ __forceinline__
minmax_pair<T> operator()(const T &a, const minmax_pair<T> &b) const
{
minmax_pair<T> result;
result.min_val = cub::MIN(a, b.min_val);
result.max_val = cub::MAX(a, b.max_val);
return result;
}
};
template<typename T>
void cubMinMax_alt(T* d_in, T* h_out, const int length)
{
MinMax_initializer<T> init_op;
minmax_pair<T> d_out=init_op(d_in[0]);
//hipMalloc(&d_out, sizeof(T)*2);
MinMax_operator<T> minmax_op;
//initialize d_out
// hipMemcpy(d_out, d_in, sizeof(T), hipMemcpyDeviceToDevice);
// hipMemcpy(d_out+1, d_in, sizeof(T), hipMemcpyDeviceToDevice);
// determine size of memory needed an allocate
void *d_temp_storage = NULL;
size_t temp_size = 0;
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_size, d_in, &d_out, length, minmax_op);
hipMalloc(&d_temp_storage, temp_size);
// find min and max
hipcub::DeviceReduce::Reduce(d_temp_storage, temp_size, d_in, &d_out, length, minmax_op);
// copy to host
hipMemcpy(h_out, d_out, 2*sizeof(T), hipMemcpyDeviceToHost);
// cleanup
hipFree(d_temp_storage);
hipFree(d_out);
}
*/
template<typename T>
void cubMinMax_alt(T* d_in, T* h_out, const int length)
{
T* d_out;
hipMalloc(&d_out, sizeof(T)*2);
void *d_temp_storage_min = NULL;
size_t temp_size_min = 0;
void *d_temp_storage_max = NULL;
size_t temp_size_max = 0;
hipStream_t *stream = (hipStream_t *)malloc(2*sizeof(hipStream_t));
hipStreamCreate(&stream[0]);
hipStreamCreate(&stream[1]);
// determine size of memory needed an allocate
hipcub::DeviceReduce::Min(d_temp_storage_min, temp_size_min, d_in, d_out, length, stream[0]);
hipMalloc(&d_temp_storage_min, 2*temp_size_min);
hipcub::DeviceReduce::Min(d_temp_storage_min, temp_size_min, d_in, d_out, length, stream[0]);
hipcub::DeviceReduce::Max(d_temp_storage_min+temp_size_min, temp_size_min, d_in, d_out+1, length, stream[1]);
/*
hipcub::DeviceReduce::Max(d_temp_storage_max, temp_size_max, d_in, d_out+1, length, stream[1]);
hipMalloc(&d_temp_storage_max, temp_size_max);
hipcub::DeviceReduce::Max(d_temp_storage_max, temp_size_max, d_in, d_out+1, length, stream[1]);
*/
// find min
//hipcub::DeviceReduce::Min(d_temp_storage, temp_size, d_in, d_out, length);
// find max
// hipcub::DeviceReduce::Max(d_temp_storage, temp_size, d_in, d_out+1, length);
// copy to host
hipMemcpy(h_out, d_out, 2*sizeof(T), hipMemcpyDeviceToHost);
// cleanup
hipFree(d_temp_storage_min);
hipFree(d_temp_storage_max);
hipStreamDestroy(stream[0]);
hipStreamDestroy(stream[1]);
hipFree(d_out);
}
| 422e8669fac1cd63da2aa54e4783cc5309a6109e.cu |
/*
template<typename T>
struct minmax_pair
{
T min_val;
T max_val;
};
template<typename T>
struct MinMax_initializer
{
__host__ __device__ __forceinline__
minmax_pair<T> operator()(const T &a) const
{
minmax_pair<T> result;
result.min_val = a;
result.max_val = a;
return result;
}
};
template<typename T>
struct MinMax_operator
{
__host__ __device__ __forceinline__
minmax_pair<T> operator()(const T &a, const minmax_pair<T> &b) const
{
minmax_pair<T> result;
result.min_val = cub::MIN(a, b.min_val);
result.max_val = cub::MAX(a, b.max_val);
return result;
}
};
template<typename T>
void cubMinMax_alt(T* d_in, T* h_out, const int length)
{
MinMax_initializer<T> init_op;
minmax_pair<T> d_out=init_op(d_in[0]);
//cudaMalloc(&d_out, sizeof(T)*2);
MinMax_operator<T> minmax_op;
//initialize d_out
// cudaMemcpy(d_out, d_in, sizeof(T), cudaMemcpyDeviceToDevice);
// cudaMemcpy(d_out+1, d_in, sizeof(T), cudaMemcpyDeviceToDevice);
// determine size of memory needed an allocate
void *d_temp_storage = NULL;
size_t temp_size = 0;
cub::DeviceReduce::Reduce(d_temp_storage, temp_size, d_in, &d_out, length, minmax_op);
cudaMalloc(&d_temp_storage, temp_size);
// find min and max
cub::DeviceReduce::Reduce(d_temp_storage, temp_size, d_in, &d_out, length, minmax_op);
// copy to host
cudaMemcpy(h_out, d_out, 2*sizeof(T), cudaMemcpyDeviceToHost);
// cleanup
cudaFree(d_temp_storage);
cudaFree(d_out);
}
*/
template<typename T>
void cubMinMax_alt(T* d_in, T* h_out, const int length)
{
T* d_out;
cudaMalloc(&d_out, sizeof(T)*2);
void *d_temp_storage_min = NULL;
size_t temp_size_min = 0;
void *d_temp_storage_max = NULL;
size_t temp_size_max = 0;
cudaStream_t *stream = (cudaStream_t *)malloc(2*sizeof(cudaStream_t));
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
// determine size of memory needed an allocate
cub::DeviceReduce::Min(d_temp_storage_min, temp_size_min, d_in, d_out, length, stream[0]);
cudaMalloc(&d_temp_storage_min, 2*temp_size_min);
cub::DeviceReduce::Min(d_temp_storage_min, temp_size_min, d_in, d_out, length, stream[0]);
cub::DeviceReduce::Max(d_temp_storage_min+temp_size_min, temp_size_min, d_in, d_out+1, length, stream[1]);
/*
cub::DeviceReduce::Max(d_temp_storage_max, temp_size_max, d_in, d_out+1, length, stream[1]);
cudaMalloc(&d_temp_storage_max, temp_size_max);
cub::DeviceReduce::Max(d_temp_storage_max, temp_size_max, d_in, d_out+1, length, stream[1]);
*/
// find min
//cub::DeviceReduce::Min(d_temp_storage, temp_size, d_in, d_out, length);
// find max
// cub::DeviceReduce::Max(d_temp_storage, temp_size, d_in, d_out+1, length);
// copy to host
cudaMemcpy(h_out, d_out, 2*sizeof(T), cudaMemcpyDeviceToHost);
// cleanup
cudaFree(d_temp_storage_min);
cudaFree(d_temp_storage_max);
cudaStreamDestroy(stream[0]);
cudaStreamDestroy(stream[1]);
cudaFree(d_out);
}
|
2549713e3ddad9a45e993af2e9704df1441075e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for CPU.
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
#include <hipcub/hipcub.hpp>
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##SinCos), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call hipblasHgemm
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float,CUDAContext>(TransA,
TransB,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(hipblasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_DEFAULT_MATH));
}
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] += x[i];
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(double);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
LDA,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const TIndex N, const T alpha, T *Y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const int N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void UniformIntFit(const int N, const int min, const int max,
unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const int n, const int min, const int max, int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
int HandleOddLengthRandGaussian(
const int n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const int even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
const int even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
&result,
HIP_R_16F,
HIP_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] +=
reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] +
reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename FloatIterT>
void SumFloatIter(
const int N,
FloatIterT it,
float*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(float) - 1) / sizeof(float));
if (!dest) {
// allocate one more float at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<float>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<float>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumFloatIter(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumFloatIter(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumFloatIter(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumFloatIter(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float16>),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < dkernel_h; i += dilation_h) {
int h = h_in + i;
for (int j = 0; j < dkernel_w; j += dilation_w) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index =
(((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) *
width_col +
w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int c_col = (h_k * patch_w + w_k) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
}
data_im[index] = val;
}
}
// Ported from caffe1
template <typename T, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,
const T* data_im,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
T* data_col_ptr = data_col + channel_out;
const T* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) {
break;
}
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename T, int num_axes>
__global__ void col2im_nd_gpu_kernel(
const int n,
const T* data_col,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent)
? 0
: (d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
T val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, height, width, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, width, channels, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
template <>
void Col2imNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_img,
CUDAContext* context) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define COL2IM_ND_KERNEL(n) \
col2im_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) */ \
, dim3(CAFFE_GET_BLOCKS(img_size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
img_size, \
data_col, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_img)
switch (N) {
case 1:
COL2IM_ND_KERNEL(1);
break;
case 2:
COL2IM_ND_KERNEL(2);
break;
case 3:
COL2IM_ND_KERNEL(3);
break;
case 4:
COL2IM_ND_KERNEL(4);
break;
case 5:
COL2IM_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Col2imNd does not support computation with ", N, " spatial axes");
}
}
template <>
void Im2colNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_img,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_col,
CUDAContext* context,
bool /*accumlate_output*/) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define IM2COL_ND_KERNEL(n) \
im2col_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) */ \
, dim3(CAFFE_GET_BLOCKS(col_size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
col_size, \
data_img, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_col)
switch (N) {
case 1:
IM2COL_ND_KERNEL(1);
break;
case 2:
IM2COL_ND_KERNEL(2);
break;
case 3:
IM2COL_ND_KERNEL(3);
break;
case 4:
IM2COL_ND_KERNEL(4);
case 5:
IM2COL_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Im2colNd does not support computation with ", N, " spatial axes");
}
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context) {
hipMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
hipMemcpyDeviceToDevice, context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
__global__ void rowwise_max_kernel(
const int rows,
const int cols,
const float* data,
float* out) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) {
float maxval = -FLT_MAX;
// NB: The memory accesses here are sequentialized; without unrolling
// the loop, there will not be any ILP. However, because we are running
// this kernel with a lot of threads, this should not be a big problem.
// However, if we reduce the number of threads to take advantage of
// warp-wide synchronization, this may become a problem again.
for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) {
maxval = max(data[rowIndex * cols + colIndex], maxval);
}
maxval = BlockReduce(temp_storage).Reduce(maxval, hipcub::Max());
if (threadIdx.x == 0) {
out[rowIndex] = maxval;
}
__syncthreads();
}
}
__global__ void colwise_max_kernel(
const int rows,
const int cols,
const float* data,
float* out) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int colIndex = blockIdx.x; colIndex < cols; colIndex += gridDim.x) {
float maxval = -FLT_MAX;
for (int rowIndex = threadIdx.x; rowIndex < rows; rowIndex += blockDim.x) {
maxval = max(data[rowIndex * cols + colIndex], maxval);
}
maxval = BlockReduce(temp_storage).Reduce(maxval, hipcub::Max());
if (threadIdx.x == 0) {
out[colIndex] = maxval;
}
__syncthreads();
}
}
} // namespace
template <>
void RowwiseMax(
const int N,
const int D,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( rowwise_max_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, y);
}
template <>
void ColwiseMax(
const int N,
const int D,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( colwise_max_kernel),
dim3(::min(D, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, y);
}
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
} // namespace math
} // namespace caffe2
| 2549713e3ddad9a45e993af2e9704df1441075e7.cu | // Implements the math functions for CPU.
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
#include <cub/cub.cuh>
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
_Kernel_##T##_##SinCos<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call cublasHgemm
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float,CUDAContext>(TransA,
TransB,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(cublasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_DEFAULT_MATH));
}
}
#endif // CUDA_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] += x[i];
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T><<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(double);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
LDA,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const TIndex N, const T alpha, T *Y, \
CUDAContext* context) { \
SetKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const int N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void UniformIntFit(const int N, const int min, const int max,
unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const int n, const int min, const int max, int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
int HandleOddLengthRandGaussian(
const int n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const int even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
const int even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
&result,
CUDA_R_16F,
CUDA_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] +=
reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] +
reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename FloatIterT>
void SumFloatIter(
const int N,
FloatIterT it,
float*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(float) - 1) / sizeof(float));
if (!dest) {
// allocate one more float at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<float>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<float>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumFloatIter(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumFloatIter(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumFloatIter(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumFloatIter(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
SelectKernel<float16><<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernel<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
AxpyKernel<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < dkernel_h; i += dilation_h) {
int h = h_in + i;
for (int j = 0; j < dkernel_w; j += dilation_w) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index =
(((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) *
width_col +
w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int c_col = (h_k * patch_w + w_k) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
}
data_im[index] = val;
}
}
// Ported from caffe1
template <typename T, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,
const T* data_im,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
T* data_col_ptr = data_col + channel_out;
const T* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) {
break;
}
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename T, int num_axes>
__global__ void col2im_nd_gpu_kernel(
const int n,
const T* data_col,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent)
? 0
: (d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
T val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, height, width, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, width, channels, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
template <>
void Col2imNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_img,
CUDAContext* context) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define COL2IM_ND_KERNEL(n) \
col2im_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \
<<<CAFFE_GET_BLOCKS(img_size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
img_size, \
data_col, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_img)
switch (N) {
case 1:
COL2IM_ND_KERNEL(1);
break;
case 2:
COL2IM_ND_KERNEL(2);
break;
case 3:
COL2IM_ND_KERNEL(3);
break;
case 4:
COL2IM_ND_KERNEL(4);
break;
case 5:
COL2IM_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Col2imNd does not support computation with ", N, " spatial axes");
}
}
template <>
void Im2colNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_img,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_col,
CUDAContext* context,
bool /*accumlate_output*/) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define IM2COL_ND_KERNEL(n) \
im2col_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \
<<<CAFFE_GET_BLOCKS(col_size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
col_size, \
data_img, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_col)
switch (N) {
case 1:
IM2COL_ND_KERNEL(1);
break;
case 2:
IM2COL_ND_KERNEL(2);
break;
case 3:
IM2COL_ND_KERNEL(3);
break;
case 4:
IM2COL_ND_KERNEL(4);
case 5:
IM2COL_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Im2colNd does not support computation with ", N, " spatial axes");
}
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context) {
cudaMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
cudaMemcpyDeviceToDevice, context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
__global__ void rowwise_max_kernel(
const int rows,
const int cols,
const float* data,
float* out) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int rowIndex = blockIdx.x; rowIndex < rows; rowIndex += gridDim.x) {
float maxval = -FLT_MAX;
// NB: The memory accesses here are sequentialized; without unrolling
// the loop, there will not be any ILP. However, because we are running
// this kernel with a lot of threads, this should not be a big problem.
// However, if we reduce the number of threads to take advantage of
// warp-wide synchronization, this may become a problem again.
for (int colIndex = threadIdx.x; colIndex < cols; colIndex += blockDim.x) {
maxval = max(data[rowIndex * cols + colIndex], maxval);
}
maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max());
if (threadIdx.x == 0) {
out[rowIndex] = maxval;
}
__syncthreads();
}
}
__global__ void colwise_max_kernel(
const int rows,
const int cols,
const float* data,
float* out) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int colIndex = blockIdx.x; colIndex < cols; colIndex += gridDim.x) {
float maxval = -FLT_MAX;
for (int rowIndex = threadIdx.x; rowIndex < rows; rowIndex += blockDim.x) {
maxval = max(data[rowIndex * cols + colIndex], maxval);
}
maxval = BlockReduce(temp_storage).Reduce(maxval, cub::Max());
if (threadIdx.x == 0) {
out[colIndex] = maxval;
}
__syncthreads();
}
}
} // namespace
template <>
void RowwiseMax(
const int N,
const int D,
const float* x,
float* y,
CUDAContext* context) {
rowwise_max_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, y);
}
template <>
void ColwiseMax(
const int N,
const int D,
const float* x,
float* y,
CUDAContext* context) {
colwise_max_kernel<<<
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, y);
}
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
} // namespace math
} // namespace caffe2
|
0c055575a8c1e0f8f673a281403b290ce3633a5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
extern "C" void
magmablas_ssymmetrize_tiles( char uplo, magma_int_t m, float *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
/*
Purpose
=======
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix dA that is valid on input.
= 'U': Upper triangular part
= 'L': Lower triangular part
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
dA (input/output) COMPLEX REAL array, dimension (LDDA,N)
The m by m matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
===================================================================== */
if ( m == 0 || ntile == 0 )
return;
assert( m >= 0 );
assert( ldda >= m );
assert( ldda >= (ntile - 1)*mstride + m );
assert( ntile >= 0 );
assert( mstride >= 0 );
assert( nstride >= 0 );
assert( mstride >= m || nstride >= m ); // prevent tile overlap
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( (uplo == 'U') || (uplo == 'u') ) {
hipLaunchKernelGGL(( ssymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride );
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
hipLaunchKernelGGL(( ssymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride );
}
else {
printf( "uplo has illegal value\n" );
exit(1);
}
}
| 0c055575a8c1e0f8f673a281403b290ce3633a5d.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/*
Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix.
Grid is ntile x ceil(m/NB).
Each tile is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_tiles_lower( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_tiles_upper( int m, float *dA, int ldda, int mstride, int nstride )
{
// shift dA to tile's top-left corner
dA += blockIdx.x*(mstride + nstride*ldda);
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.y*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
extern "C" void
magmablas_ssymmetrize_tiles( char uplo, magma_int_t m, float *dA, magma_int_t ldda,
magma_int_t ntile, magma_int_t mstride, magma_int_t nstride )
{
/*
Purpose
=======
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix dA that is valid on input.
= 'U': Upper triangular part
= 'L': Lower triangular part
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
dA (input/output) COMPLEX REAL array, dimension (LDDA,N)
The m by m matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
===================================================================== */
if ( m == 0 || ntile == 0 )
return;
assert( m >= 0 );
assert( ldda >= m );
assert( ldda >= (ntile - 1)*mstride + m );
assert( ntile >= 0 );
assert( mstride >= 0 );
assert( nstride >= 0 );
assert( mstride >= m || nstride >= m ); // prevent tile overlap
dim3 threads( NB );
dim3 grid( ntile, (m + NB - 1)/NB );
//printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x );
if ( (uplo == 'U') || (uplo == 'u') ) {
ssymmetrize_tiles_upper<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride );
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
ssymmetrize_tiles_lower<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride );
}
else {
printf( "uplo has illegal value\n" );
exit(1);
}
}
|
206e8622736f45f7cd859250dcc2726e8ef1606a.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudacommon.h"
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <iostream>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Spmv.h"
#include "util.h"
#include "conf.h"
using namespace std;
texture<float, 1> vecTex; // vector textures
texture<int2, 1> vecTexD;
// Texture Readers (used so kernels can be templated)
struct texReaderSP {
__device__ __forceinline__ float operator()(const int idx) const
{
return tex1Dfetch(vecTex, idx);
}
};
struct texReaderDP {
__device__ __forceinline__ double operator()(const int idx) const
{
int2 v = tex1Dfetch(vecTexD, idx);
#if (__CUDA_ARCH__ < 130)
// Devices before arch 130 don't support DP, and having the
// __hiloint2double() intrinsic will cause compilation to fail.
// This return statement added as a workaround -- it will compile,
// but since the arch doesn't support DP, it will never be called
return 0;
#else
return __hiloint2double(v.y, v.x);
#endif
}
};
template <typename floatType>
void memcpyHostToDevice(floatType *dst, floatType *src, int size ){
CUDA_SAFE_CALL(hipMemcpy(dst, src, size * sizeof(floatType),hipMemcpyHostToDevice));
};
template <typename floatType>
void memcpyDeviceTexture(const void* devPtr, size_t size ){
if (sizeof(floatType) == sizeof(float))
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
CUDA_SAFE_CALL(hipBindTexture(0, vecTex, devPtr, channelDesc,size * sizeof(float)));
}else {
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>();
CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, devPtr, channelDesc,size * sizeof(int2)));
}
};
template void memcpyHostToDevice<double>(double *dst, double *src, int size );
template void memcpyHostToDevice<int>(int *dst, int *src, int size );
template void memcpyDeviceTexture<double>(const void* devPtr, size_t size );
template void memcpyDeviceTexture<int>(const void* devPtr, size_t size );
// Forward declarations for kernels
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart);
template <typename fpType, typename texReader>
__global__ void
spmv_ellpackr_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowLengths,
const int dim, fpType * __restrict__ out);
template <typename fpType>
__global__ void
zero(fpType * __restrict__ a, const int size);
template <typename floatType>
void csrTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
floatType *h_vec = csrHost->getVec()+deviceStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
//int numNonZeroes = csrDevice->getNumNonZeroes();
int numNonZeroes = h_rowDelimiters[numRows]-secStart;
//std::cout<<"secStart: "<<secStart<<std::endl;
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
#ifdef CUDA_RECORD
// Setup events for timing
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Transfer data to device
CUDA_SAFE_CALL(hipEventRecord(start, 0));
#endif
CUDA_SAFE_CALL(hipMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), hipMemcpyHostToDevice));
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float iTransferTime, oTransferTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&iTransferTime, start, stop));
iTransferTime *= 1.e-3;
#endif
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
suffix = "-SP";
}else {
suffix = "-DP";
}
// Setup thread configuration
int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE);
int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE));
int passes = op->getOptionInt("passes");
int iters = op->getOptionInt("iterations");
#ifdef CUDA_RECORD
// Results description info
char atts[TEMP_BUFFER_SIZE];
sprintf(atts, "%d_elements_%d_rows",numNonZeroes, numRows);
string prefix = "";
double gflop = 2 * (double) numNonZeroes / 1e9;
#endif
#ifdef DARTS_DEBUG
cout << "CSR Scalar Kernel\n";
#endif
//cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl;
//for (int k=0; k<passes; k++)
//{
// Run Scalar Kernel
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(hipEventRecord(start, 0));
#endif
//for (int j = 0; j < iters; j++)
//{
if(suffix == "-DP"){
hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderDP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0,
d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}else{
hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderSP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0,
d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}
//}
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float scalarKernelTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&scalarKernelTime, start, stop));
// Transfer data back to host
CUDA_SAFE_CALL(hipEventRecord(start, 0));
#endif
CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType),hipMemcpyDeviceToHost));
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
CUDA_SAFE_CALL(hipEventElapsedTime(&oTransferTime, start, stop));
#endif
hipDeviceSynchronize();
#ifdef CUDA_RECORD
oTransferTime *= 1.e-3;
scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3;
double totalTransfer = iTransferTime + oTransferTime;
string startPoint = std::to_string(csrDevice->getStartPoint());
string testName = prefix+"CSR-Scalar"+suffix+"-startPoint-"+startPoint;
resultDB->AddResult(testName, atts, "Gflop/s",gflop/(scalarKernelTime));
resultDB->AddResult(testName, atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer));
//resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer));
#endif
//}
}
template <typename floatType>
void csrStreamTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
//floatType *h_vec = csrHost->getVec()+deviceStart;
//floatType *h_vec = csrHost->getVec()+secStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
//int numNonZeroes = csrDevice->getNumNonZeroes();
int numNonZeroes = h_rowDelimiters[numRows]-secStart;
#ifdef DARTS_DEBUG
std::cout<<"deviceStart: "<<deviceStart<<",secStart: "<<secStart<<",numRows: "<<numRows<<std::endl;
#endif
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
suffix = "-SP";
}else {
suffix = "-DP";
}
#ifdef DARTS_DEBUG
cout << "CSR Stream Scalar Kernel\n";
#endif
int nStream = 32;
hipStream_t *stream;
hipEvent_t *cuEvent;
stream = new hipStream_t[nStream];
cuEvent = new hipEvent_t[nStream];
int chunk = numRows/nStream;
int *sNumRows = new int[nStream];
int *sNumNonZeroes = new int[nStream];
int *svcStart = new int[nStream];
int *srStart = new int[nStream];
int *ssStart = new int[nStream];
// Setup thread configuration
int *nBlocksScalar = new int[nStream];
for(int i=0; i<nStream; ++i){
//CUDA_SAFE_CALL(hipStreamCreateWithFlags(&stream[i],hipStreamNonBlocking));
CUDA_SAFE_CALL(hipStreamCreate(&stream[i]));
//CUDA_SAFE_CALL(hipEventCreate(&cuEvent[i]));
CUDA_SAFE_CALL(hipEventCreateWithFlags(&cuEvent[i],hipEventDisableTiming));
sNumRows[i] = (i==(nStream-1))?(numRows-i*chunk):(chunk);
sNumNonZeroes[i]= (i==(nStream-1))?(h_rowDelimiters[numRows]-h_rowDelimiters[i*chunk]):(h_rowDelimiters[(i+1)*chunk]-h_rowDelimiters[i*chunk]);
svcStart[i] = h_rowDelimiters[i*chunk]-h_rowDelimiters[0] ;
srStart[i] = i*chunk;
ssStart[i] = h_rowDelimiters[i*chunk];
nBlocksScalar[i] = (int) ceil((floatType) sNumRows[i] / BLOCK_SIZE);
}
for(int i=0; i<nStream; ++i){
#ifdef DARTS_DEBUG
std::cout<<"stream: "<<i<<std::endl;
std::cout<<"svcStart["<<i<<"] = "<<svcStart[i]<<",svcStart["<<i<<"] = "<<svcStart[i]<<",srStart["<<i<<"] = "<<srStart[i]<<",sNumRows["<<i<<"] = "<<sNumRows[i]<<",sNumNonZeroes["<<i<<"] = "<<sNumNonZeroes[i]<<std::endl;
std::cout<<"d_val addr: "<<d_val<<std::endl;
#endif
CUDA_SAFE_CALL(hipMemcpyAsync(d_val + svcStart[i] , h_val+svcStart[i], sNumNonZeroes[i] * sizeof(floatType),hipMemcpyHostToDevice,stream[i]));
CUDA_SAFE_CALL(hipMemcpyAsync(d_cols+ svcStart[i], h_cols+svcStart[i], sNumNonZeroes[i] * sizeof(int),hipMemcpyHostToDevice,stream[i]));
CUDA_SAFE_CALL(hipMemcpyAsync(d_rowDelimiters + srStart[i], h_rowDelimiters+srStart[i],(sNumRows[i]+1) * sizeof(int), hipMemcpyHostToDevice,stream[i]));
if(suffix == "-DP"){
hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderDP>), dim3(nBlocksScalar[i]), dim3(BLOCK_SIZE), 0, stream[i],
d_val+svcStart[i], d_cols+svcStart[i], d_rowDelimiters+srStart[i], sNumRows[i], d_out+srStart[i],ssStart[i]);
}else{
hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderSP>), dim3(nBlocksScalar[i]), dim3(BLOCK_SIZE),0, stream[i],
d_val+svcStart[i], d_cols+svcStart[i], d_rowDelimiters+srStart[i], sNumRows[i], d_out+srStart[i],ssStart[i]);
}
CUDA_SAFE_CALL(hipMemcpyAsync(h_out+srStart[i], d_out+srStart[i], sNumRows[i] * sizeof(floatType),hipMemcpyDeviceToHost,stream[i]));
}
//CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipDeviceSynchronize());
std::cout<<"h_our addr: "<<h_out<<",h_out[1]="<<h_out[1]<<std::endl;
delete [] sNumRows ;
delete [] sNumNonZeroes ;
delete [] svcStart;
delete [] srStart;
delete [] ssStart;
delete [] nBlocksScalar ;
for(int i=0; i<nStream; ++i){
CUDA_SAFE_CALL(hipStreamDestroy(stream[i]));
CUDA_SAFE_CALL(hipEventDestroy(cuEvent[i]));
}
delete [] stream;
delete [] cuEvent;
#ifdef CUDA_RECORD
#endif
}
template <typename floatType>
void csrTestVector(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
floatType *h_vec = csrHost->getVec()+deviceStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
int numNonZeroes = csrDevice->getNumNonZeroes();
//std::cout<<"secStart: "<<secStart<<std::endl;
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
// Setup events for timing
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Transfer data to device
CUDA_SAFE_CALL(hipEventRecord(start, 0));
CUDA_SAFE_CALL(hipMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),hipMemcpyHostToDevice));
// CUDA_SAFE_CALL(hipMemcpy(d_vec, h_vec, numRows * sizeof(floatType),hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float iTransferTime, oTransferTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&iTransferTime, start, stop));
iTransferTime *= 1.e-3;
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
// hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
// CUDA_SAFE_CALL(hipBindTexture(0, vecTex, d_vec, channelDesc,numRows * sizeof(float)));
suffix = "-SP";
}
else {
// hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>();
// CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, d_vec, channelDesc,numRows * sizeof(int2)));
suffix = "-DP";
}
// Setup thread configuration
int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE);
int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE));
int passes = op->getOptionInt("passes");
int iters = op->getOptionInt("iterations");
// Results description info
char atts[TEMP_BUFFER_SIZE];
sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows);
string prefix = "";
double gflop = 2 * (double) numNonZeroes / 1e9;
cout << "CSR vector Kernel\n";
//cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl;
//for (int k=0; k<passes; k++)
//{
// Run Scalar Kernel
CUDA_SAFE_CALL(hipEventRecord(start, 0));
//for (int j = 0; j < iters; j++)
//{
if(suffix == "-DP"){
hipLaunchKernelGGL(( spmv_csr_vector_section_kernel<floatType, texReaderDP>), dim3(nBlocksVector), dim3(BLOCK_SIZE), 0, 0,
d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}else{
hipLaunchKernelGGL(( spmv_csr_vector_section_kernel<floatType, texReaderSP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0,
d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}
//}
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float vectorKernelTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&vectorKernelTime, start, stop));
// Transfer data back to host
CUDA_SAFE_CALL(hipEventRecord(start, 0));
CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
CUDA_SAFE_CALL(hipEventElapsedTime(&oTransferTime, start, stop));
hipDeviceSynchronize();
vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3;
string testName = prefix+"CSR-Vector"+suffix;
double totalTransfer = iTransferTime + oTransferTime;
resultDB->AddResult(testName, atts, "Gflop/s",gflop/(vectorKernelTime));
resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (vectorKernelTime+totalTransfer));
//}
}
// ****************************************************************************
// Function: spmv_csr_scalar_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the CSR data storage format, using a thread per row of the sparse
// matrix; based on Bell (SC09) and Baskaran (IBM Tech Report)
//
// Arguments:
// val: array holding the non-zero values for the matrix
// cols: array of column indices for each element of the sparse matrix
// rowDelimiters: array of size dim+1 holding indices to rows of the matrix
// last element is the index one past the last
// element of the matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 28, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out)
{
int myRow = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (myRow < dim)
{
fpType t = 0.0f;
int start = rowDelimiters[myRow];
int end = rowDelimiters[myRow+1];
for (int j = start; j < end; j++)
{
int col = cols[j];
t += val[j] * vecTexReader(col);
#ifdef DARTS_DEBUG
if(threadIdx.x <20&&blockIdx.x ==0){
printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col));
}
#endif
}
out[myRow] = t;
}
}
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart)
{
int myRow = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (myRow < dim)
{
fpType t = 0.0f;
int start = rowDelimiters[myRow]-secStart;
int end = rowDelimiters[myRow+1]-secStart;
for (int j = start; j < end; j++)
{
int col = cols[j];
t += val[j] * vecTexReader(col);
#ifdef DARTS_DEBUG
//if(threadIdx.x <10&&blockIdx.x ==0){
// printf("val[%ld]=%g, vecTexReader(%ld)=%g\n",j,val[j],col,vecTexReader(col));
//}
#endif
}
out[myRow] = t;
#ifdef DARTS_DEBUG
//if(threadIdx.x <10&&blockIdx.x ==0){
// printf("out[%ld]=%g\n",out[myRow]);
//}
#endif
}
}
// ****************************************************************************
// Function: spmv_csr_vector_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the CSR data storage format, using a warp per row of the sparse
// matrix; based on Bell (SC09) and Baskaran (IBM Tech Report)
//
// Arguments:
// val: array holding the non-zero values for the matrix
// cols: array of column indices for each element of the sparse matrix
// rowDelimiters: array of size dim+1 holding indices to rows of the matrix
// last element is the index one past the last
// element of the matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 28, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (warpSize-1);
int warpsPerBlock = blockDim.x / warpSize;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize);
// Texture reader for the dense vector
texReader vecTexReader;
__shared__ volatile fpType partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
fpType mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += warpSize)
{
int col = cols[j];
mySum += val[j] * vecTexReader(col);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (warpSize-1);
int warpsPerBlock = blockDim.x / warpSize;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize);
// Texture reader for the dense vector
texReader vecTexReader;
__shared__ volatile fpType partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow]-secStart;
int warpEnd = rowDelimiters[myRow+1]-secStart;
fpType mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += warpSize)
{
int col = cols[j];
mySum += val[j] * vecTexReader(col);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
// ****************************************************************************
// Function: spmv_ellpackr_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the ELLPACK-R data storage format; based on Vazquez et al (Univ. of
// Almeria Tech Report 2009)
//
// Arguments:
// val: array holding the non-zero values for the matrix in column
// major format and padded with zeros up to the length of longest row
// cols: array of column indices for each element of the sparse matrix
// rowLengths: array storing the length of each row of the sparse matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing directly
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 29, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_ellpackr_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowLengths,
const int dim, fpType * __restrict__ out)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (t < dim)
{
fpType result = 0.0f;
int max = rowLengths[t];
for (int i = 0; i < max; i++)
{
int ind = i*dim+t;
result += val[ind] * vecTexReader(cols[ind]);
}
out[t] = result;
}
}
template <typename fpType>
__global__ void
zero(fpType * __restrict__ a, const int size)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t < size) a[t] = 0;
}
template void csrTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
template void csrStreamTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrStreamTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
template void csrTestVector<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrTestVector<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
| 206e8622736f45f7cd859250dcc2726e8ef1606a.cu | #include "cudacommon.h"
#include <cassert>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Spmv.h"
#include "util.h"
#include "conf.h"
using namespace std;
texture<float, 1> vecTex; // vector textures
texture<int2, 1> vecTexD;
// Texture Readers (used so kernels can be templated)
struct texReaderSP {
__device__ __forceinline__ float operator()(const int idx) const
{
return tex1Dfetch(vecTex, idx);
}
};
struct texReaderDP {
__device__ __forceinline__ double operator()(const int idx) const
{
int2 v = tex1Dfetch(vecTexD, idx);
#if (__CUDA_ARCH__ < 130)
// Devices before arch 130 don't support DP, and having the
// __hiloint2double() intrinsic will cause compilation to fail.
// This return statement added as a workaround -- it will compile,
// but since the arch doesn't support DP, it will never be called
return 0;
#else
return __hiloint2double(v.y, v.x);
#endif
}
};
template <typename floatType>
void memcpyHostToDevice(floatType *dst, floatType *src, int size ){
CUDA_SAFE_CALL(cudaMemcpy(dst, src, size * sizeof(floatType),cudaMemcpyHostToDevice));
};
template <typename floatType>
void memcpyDeviceTexture(const void* devPtr, size_t size ){
if (sizeof(floatType) == sizeof(float))
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, devPtr, channelDesc,size * sizeof(float)));
}else {
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>();
CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, devPtr, channelDesc,size * sizeof(int2)));
}
};
template void memcpyHostToDevice<double>(double *dst, double *src, int size );
template void memcpyHostToDevice<int>(int *dst, int *src, int size );
template void memcpyDeviceTexture<double>(const void* devPtr, size_t size );
template void memcpyDeviceTexture<int>(const void* devPtr, size_t size );
// Forward declarations for kernels
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out);
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart);
template <typename fpType, typename texReader>
__global__ void
spmv_ellpackr_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowLengths,
const int dim, fpType * __restrict__ out);
template <typename fpType>
__global__ void
zero(fpType * __restrict__ a, const int size);
template <typename floatType>
void csrTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
floatType *h_vec = csrHost->getVec()+deviceStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
//int numNonZeroes = csrDevice->getNumNonZeroes();
int numNonZeroes = h_rowDelimiters[numRows]-secStart;
//std::cout<<"secStart: "<<secStart<<std::endl;
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
#ifdef CUDA_RECORD
// Setup events for timing
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Transfer data to device
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
#endif
CUDA_SAFE_CALL(cudaMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), cudaMemcpyHostToDevice));
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float iTransferTime, oTransferTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&iTransferTime, start, stop));
iTransferTime *= 1.e-3;
#endif
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
suffix = "-SP";
}else {
suffix = "-DP";
}
// Setup thread configuration
int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE);
int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE));
int passes = op->getOptionInt("passes");
int iters = op->getOptionInt("iterations");
#ifdef CUDA_RECORD
// Results description info
char atts[TEMP_BUFFER_SIZE];
sprintf(atts, "%d_elements_%d_rows",numNonZeroes, numRows);
string prefix = "";
double gflop = 2 * (double) numNonZeroes / 1e9;
#endif
#ifdef DARTS_DEBUG
cout << "CSR Scalar Kernel\n";
#endif
//cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl;
//for (int k=0; k<passes; k++)
//{
// Run Scalar Kernel
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
#endif
//for (int j = 0; j < iters; j++)
//{
if(suffix == "-DP"){
spmv_csr_scalar_section_kernel<floatType, texReaderDP><<<nBlocksScalar, BLOCK_SIZE>>>
(d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}else{
spmv_csr_scalar_section_kernel<floatType, texReaderSP><<<nBlocksScalar, BLOCK_SIZE>>>
(d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}
//}
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float scalarKernelTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&scalarKernelTime, start, stop));
// Transfer data back to host
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
#endif
CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType),cudaMemcpyDeviceToHost));
#ifdef CUDA_RECORD
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaEventElapsedTime(&oTransferTime, start, stop));
#endif
cudaThreadSynchronize();
#ifdef CUDA_RECORD
oTransferTime *= 1.e-3;
scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3;
double totalTransfer = iTransferTime + oTransferTime;
string startPoint = std::to_string(csrDevice->getStartPoint());
string testName = prefix+"CSR-Scalar"+suffix+"-startPoint-"+startPoint;
resultDB->AddResult(testName, atts, "Gflop/s",gflop/(scalarKernelTime));
resultDB->AddResult(testName, atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer));
//resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer));
#endif
//}
}
template <typename floatType>
void csrStreamTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
//floatType *h_vec = csrHost->getVec()+deviceStart;
//floatType *h_vec = csrHost->getVec()+secStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
//int numNonZeroes = csrDevice->getNumNonZeroes();
int numNonZeroes = h_rowDelimiters[numRows]-secStart;
#ifdef DARTS_DEBUG
std::cout<<"deviceStart: "<<deviceStart<<",secStart: "<<secStart<<",numRows: "<<numRows<<std::endl;
#endif
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
suffix = "-SP";
}else {
suffix = "-DP";
}
#ifdef DARTS_DEBUG
cout << "CSR Stream Scalar Kernel\n";
#endif
int nStream = 32;
cudaStream_t *stream;
cudaEvent_t *cuEvent;
stream = new cudaStream_t[nStream];
cuEvent = new cudaEvent_t[nStream];
int chunk = numRows/nStream;
int *sNumRows = new int[nStream];
int *sNumNonZeroes = new int[nStream];
int *svcStart = new int[nStream];
int *srStart = new int[nStream];
int *ssStart = new int[nStream];
// Setup thread configuration
int *nBlocksScalar = new int[nStream];
for(int i=0; i<nStream; ++i){
//CUDA_SAFE_CALL(cudaStreamCreateWithFlags(&stream[i],cudaStreamNonBlocking));
CUDA_SAFE_CALL(cudaStreamCreate(&stream[i]));
//CUDA_SAFE_CALL(cudaEventCreate(&cuEvent[i]));
CUDA_SAFE_CALL(cudaEventCreateWithFlags(&cuEvent[i],cudaEventDisableTiming));
sNumRows[i] = (i==(nStream-1))?(numRows-i*chunk):(chunk);
sNumNonZeroes[i]= (i==(nStream-1))?(h_rowDelimiters[numRows]-h_rowDelimiters[i*chunk]):(h_rowDelimiters[(i+1)*chunk]-h_rowDelimiters[i*chunk]);
svcStart[i] = h_rowDelimiters[i*chunk]-h_rowDelimiters[0] ;
srStart[i] = i*chunk;
ssStart[i] = h_rowDelimiters[i*chunk];
nBlocksScalar[i] = (int) ceil((floatType) sNumRows[i] / BLOCK_SIZE);
}
for(int i=0; i<nStream; ++i){
#ifdef DARTS_DEBUG
std::cout<<"stream: "<<i<<std::endl;
std::cout<<"svcStart["<<i<<"] = "<<svcStart[i]<<",svcStart["<<i<<"] = "<<svcStart[i]<<",srStart["<<i<<"] = "<<srStart[i]<<",sNumRows["<<i<<"] = "<<sNumRows[i]<<",sNumNonZeroes["<<i<<"] = "<<sNumNonZeroes[i]<<std::endl;
std::cout<<"d_val addr: "<<d_val<<std::endl;
#endif
CUDA_SAFE_CALL(cudaMemcpyAsync(d_val + svcStart[i] , h_val+svcStart[i], sNumNonZeroes[i] * sizeof(floatType),cudaMemcpyHostToDevice,stream[i]));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_cols+ svcStart[i], h_cols+svcStart[i], sNumNonZeroes[i] * sizeof(int),cudaMemcpyHostToDevice,stream[i]));
CUDA_SAFE_CALL(cudaMemcpyAsync(d_rowDelimiters + srStart[i], h_rowDelimiters+srStart[i],(sNumRows[i]+1) * sizeof(int), cudaMemcpyHostToDevice,stream[i]));
if(suffix == "-DP"){
spmv_csr_scalar_section_kernel<floatType, texReaderDP><<<nBlocksScalar[i], BLOCK_SIZE, 0, stream[i]>>>
(d_val+svcStart[i], d_cols+svcStart[i], d_rowDelimiters+srStart[i], sNumRows[i], d_out+srStart[i],ssStart[i]);
}else{
spmv_csr_scalar_section_kernel<floatType, texReaderSP><<<nBlocksScalar[i], BLOCK_SIZE,0, stream[i]>>>
(d_val+svcStart[i], d_cols+svcStart[i], d_rowDelimiters+srStart[i], sNumRows[i], d_out+srStart[i],ssStart[i]);
}
CUDA_SAFE_CALL(cudaMemcpyAsync(h_out+srStart[i], d_out+srStart[i], sNumRows[i] * sizeof(floatType),cudaMemcpyDeviceToHost,stream[i]));
}
//CUDA_SAFE_CALL(cudaThreadSynchronize());
CUDA_SAFE_CALL(cudaDeviceSynchronize());
std::cout<<"h_our addr: "<<h_out<<",h_out[1]="<<h_out[1]<<std::endl;
delete [] sNumRows ;
delete [] sNumNonZeroes ;
delete [] svcStart;
delete [] srStart;
delete [] ssStart;
delete [] nBlocksScalar ;
for(int i=0; i<nStream; ++i){
CUDA_SAFE_CALL(cudaStreamDestroy(stream[i]));
CUDA_SAFE_CALL(cudaEventDestroy(cuEvent[i]));
}
delete [] stream;
delete [] cuEvent;
#ifdef CUDA_RECORD
#endif
}
template <typename floatType>
void csrTestVector(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){
int deviceStart = csrDevice->getStartPoint();
int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart;
int secStart = h_rowDelimiters[0] ;
floatType *h_val = csrHost->getVal()+secStart;
int *h_cols = csrHost->getCols()+secStart;
floatType *h_vec = csrHost->getVec()+deviceStart;
floatType *h_out = csrHost->getOut()+deviceStart;
int numRows = csrDevice->getNumRows();
int numNonZeroes = csrDevice->getNumNonZeroes();
//std::cout<<"secStart: "<<secStart<<std::endl;
floatType *d_val = csrDevice->getVal();
int *d_cols = csrDevice->getCols();
int *d_rowDelimiters = csrDevice->getRowDelimiters();
floatType *d_vec = csrDevice->getVec();
floatType *d_out = csrDevice->getOut();
// Setup events for timing
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Transfer data to device
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
CUDA_SAFE_CALL(cudaMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),cudaMemcpyHostToDevice));
// CUDA_SAFE_CALL(cudaMemcpy(d_vec, h_vec, numRows * sizeof(floatType),cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float iTransferTime, oTransferTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&iTransferTime, start, stop));
iTransferTime *= 1.e-3;
// Bind texture for position
string suffix;
if (sizeof(floatType) == sizeof(float)){
// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
// CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, d_vec, channelDesc,numRows * sizeof(float)));
suffix = "-SP";
}
else {
// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>();
// CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, d_vec, channelDesc,numRows * sizeof(int2)));
suffix = "-DP";
}
// Setup thread configuration
int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE);
int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE));
int passes = op->getOptionInt("passes");
int iters = op->getOptionInt("iterations");
// Results description info
char atts[TEMP_BUFFER_SIZE];
sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows);
string prefix = "";
double gflop = 2 * (double) numNonZeroes / 1e9;
cout << "CSR vector Kernel\n";
//cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl;
//for (int k=0; k<passes; k++)
//{
// Run Scalar Kernel
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
//for (int j = 0; j < iters; j++)
//{
if(suffix == "-DP"){
spmv_csr_vector_section_kernel<floatType, texReaderDP><<<nBlocksVector, BLOCK_SIZE>>>
(d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}else{
spmv_csr_vector_section_kernel<floatType, texReaderSP><<<nBlocksScalar, BLOCK_SIZE>>>
(d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart);
}
//}
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float vectorKernelTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&vectorKernelTime, start, stop));
// Transfer data back to host
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaEventElapsedTime(&oTransferTime, start, stop));
cudaThreadSynchronize();
vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3;
string testName = prefix+"CSR-Vector"+suffix;
double totalTransfer = iTransferTime + oTransferTime;
resultDB->AddResult(testName, atts, "Gflop/s",gflop/(vectorKernelTime));
resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (vectorKernelTime+totalTransfer));
//}
}
// ****************************************************************************
// Function: spmv_csr_scalar_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the CSR data storage format, using a thread per row of the sparse
// matrix; based on Bell (SC09) and Baskaran (IBM Tech Report)
//
// Arguments:
// val: array holding the non-zero values for the matrix
// cols: array of column indices for each element of the sparse matrix
// rowDelimiters: array of size dim+1 holding indices to rows of the matrix
// last element is the index one past the last
// element of the matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 28, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out)
{
int myRow = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (myRow < dim)
{
fpType t = 0.0f;
int start = rowDelimiters[myRow];
int end = rowDelimiters[myRow+1];
for (int j = start; j < end; j++)
{
int col = cols[j];
t += val[j] * vecTexReader(col);
#ifdef DARTS_DEBUG
if(threadIdx.x <20&&blockIdx.x ==0){
printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col));
}
#endif
}
out[myRow] = t;
}
}
template <typename fpType, typename texReader>
__global__ void
spmv_csr_scalar_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart)
{
int myRow = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (myRow < dim)
{
fpType t = 0.0f;
int start = rowDelimiters[myRow]-secStart;
int end = rowDelimiters[myRow+1]-secStart;
for (int j = start; j < end; j++)
{
int col = cols[j];
t += val[j] * vecTexReader(col);
#ifdef DARTS_DEBUG
//if(threadIdx.x <10&&blockIdx.x ==0){
// printf("val[%ld]=%g, vecTexReader(%ld)=%g\n",j,val[j],col,vecTexReader(col));
//}
#endif
}
out[myRow] = t;
#ifdef DARTS_DEBUG
//if(threadIdx.x <10&&blockIdx.x ==0){
// printf("out[%ld]=%g\n",out[myRow]);
//}
#endif
}
}
// ****************************************************************************
// Function: spmv_csr_vector_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the CSR data storage format, using a warp per row of the sparse
// matrix; based on Bell (SC09) and Baskaran (IBM Tech Report)
//
// Arguments:
// val: array holding the non-zero values for the matrix
// cols: array of column indices for each element of the sparse matrix
// rowDelimiters: array of size dim+1 holding indices to rows of the matrix
// last element is the index one past the last
// element of the matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 28, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (warpSize-1);
int warpsPerBlock = blockDim.x / warpSize;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize);
// Texture reader for the dense vector
texReader vecTexReader;
__shared__ volatile fpType partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
fpType mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += warpSize)
{
int col = cols[j];
mySum += val[j] * vecTexReader(col);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
template <typename fpType, typename texReader>
__global__ void
spmv_csr_vector_section_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowDelimiters,
const int dim, fpType * __restrict__ out,
const int secStart)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (warpSize-1);
int warpsPerBlock = blockDim.x / warpSize;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize);
// Texture reader for the dense vector
texReader vecTexReader;
__shared__ volatile fpType partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow]-secStart;
int warpEnd = rowDelimiters[myRow+1]-secStart;
fpType mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += warpSize)
{
int col = cols[j];
mySum += val[j] * vecTexReader(col);
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
// ****************************************************************************
// Function: spmv_ellpackr_kernel
//
// Purpose:
// Computes sparse matrix - vector multiplication on the GPU using
// the ELLPACK-R data storage format; based on Vazquez et al (Univ. of
// Almeria Tech Report 2009)
//
// Arguments:
// val: array holding the non-zero values for the matrix in column
// major format and padded with zeros up to the length of longest row
// cols: array of column indices for each element of the sparse matrix
// rowLengths: array storing the length of each row of the sparse matrix
// dim: number of rows in the matrix
// out: output - result from the spmv calculation
//
// Returns: nothing directly
// out indirectly through a pointer
//
// Programmer: Lukasz Wesolowski
// Creation: June 29, 2010
//
// Modifications:
//
// ****************************************************************************
template <typename fpType, typename texReader>
__global__ void
spmv_ellpackr_kernel(const fpType * __restrict__ val,
const int * __restrict__ cols,
const int * __restrict__ rowLengths,
const int dim, fpType * __restrict__ out)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
texReader vecTexReader;
if (t < dim)
{
fpType result = 0.0f;
int max = rowLengths[t];
for (int i = 0; i < max; i++)
{
int ind = i*dim+t;
result += val[ind] * vecTexReader(cols[ind]);
}
out[t] = result;
}
}
template <typename fpType>
__global__ void
zero(fpType * __restrict__ a, const int size)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t < size) a[t] = 0;
}
template void csrTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
template void csrStreamTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrStreamTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
template void csrTestVector<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice );
template void csrTestVector<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
|
b5fc37e91a9a25654e3ddc483c870067589de24e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<stdlib.h>
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 64
__global__ void
totalKernel(float * input, float * output, int len)
{
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x*2;
if((start+t)<len)
{
partialSum[t] = input[start+t];
if(start+t+blockDim.x < len)
partialSum[blockDim.x+t] = input[start+t+blockDim.x];
else
partialSum[blockDim.x+t] =0;
}
else
{
partialSum[t] = 0;
partialSum[blockDim.x+t] = 0;
}
__syncthreads();
for(unsigned int stride = blockDim.x; stride >0; stride/=2){
__syncthreads();
if(t<stride)
partialSum[t]+=partialSum[t+stride];
}
//__syncthreads();
output[blockIdx.x] = partialSum[0];
}
void totalCPU(float * input, float * output, int len)
{
int i=0;
output[0]=0;
for(i=0;i<len;i++)
output[0] +=input[i];
}
void loadVal(float *a, float *b, int len)
{
/*
int i=0;
char buff[256];
FILE *file;
file = fopen("dataSetRaw0.txt","r");
if(!file)
{
printf("No file found");
system("Pause");
exit(0);
}
while(fgets(buff,len,file))
{
a[i] = atof(buff);
i++;
}
for(i=0;i<len;i++)
{
a[i] = i;
}
fclose(file);
*/
a[0]=7;
a[1]=9;
a[2]=1;
a[3]=4;
a[4]=2;
a[5]=8;
a[6]=10;
a[7]=5;
a[8]=10;
a[9]=7;
a[10]=5;
a[11]=7;
a[12]=8;
a[13]=6;
a[14]=4;
a[15]=6;
a[16]=6;
a[17]=3;
a[18]=4;
a[19]=0;
a[20]=1;
a[21]=10;
a[22]=5;
a[23]=8;
a[24]=7;
a[25]=0;
a[26]=2;
a[27]=9;
a[28]=2;
a[29]=8;
a[30]=4;
a[31]=3;
a[32]=2;
a[33]=1;
a[34]=4;
a[35]=10;
a[36]=3;
a[37]=9;
a[38]=6;
a[39]=9;
a[40]=4;
a[41]=7;
a[42]=3;
a[43]=3;
a[44]=3;
a[45]=1;
a[46]=5;
a[47]=5;
a[48]=0;
a[49]=7;
a[50]=7;
a[51]=2;
a[52]=5;
a[53]=7;
a[54]=9;
a[55]=5;
a[56]=8;
a[57]=5;
a[58]=0;
a[59]=10;
a[60]=3;
a[61]=9;
a[62]=5;
a[63]=10;
a[64]=8;
a[65]=4;
a[66]=8;
a[67]=8;
a[68]=2;
a[69]=6;
a[70]=9;
a[71]=6;
a[72]=9;
a[73]=0;
a[74]=9;
a[75]=7;
a[76]=3;
a[77]=1;
a[78]=8;
a[79]=7;
a[80]=0;
a[81]=10;
a[82]=9;
a[83]=8;
a[84]=7;
a[85]=10;
a[86]=9;
a[87]=1;
a[88]=4;
a[89]=3;
a[90]=1;
a[91]=8;
a[92]=2;
a[93]=4;
a[94]=8;
a[95]=1;
a[96]=1;
a[97]=2;
a[98]=7;
a[99]=10;
a[100]=6;
a[101]=10;
a[102]=0;
a[103]=0;
a[104]=2;
a[105]=2;
a[106]=5;
a[107]=1;
a[108]=6;
a[109]=10;
a[110]=2;
a[111]=2;
a[112]=8;
a[113]=10;
a[114]=10;
a[115]=9;
a[116]=4;
a[117]=5;
a[118]=9;
a[119]=3;
a[120]=3;
a[121]=4;
a[122]=6;
a[123]=8;
a[124]=8;
a[125]=9;
a[126]=9;
a[127]=3;
a[128]=1;
a[129]=4;
a[130]=10;
a[131]=7;
a[132]=7;
a[133]=0;
a[134]=4;
a[135]=4;
a[136]=7;
a[137]=7;
a[138]=0;
a[139]=1;
a[140]=5;
a[141]=4;
a[142]=4;
a[143]=8;
a[144]=9;
a[145]=10;
a[146]=10;
a[147]=10;
a[148]=3;
a[149]=4;
a[150]=10;
a[151]=6;
a[152]=9;
a[153]=7;
a[154]=10;
a[155]=10;
a[156]=2;
a[157]=8;
a[158]=5;
a[159]=5;
a[160]=7;
a[161]=9;
a[162]=1;
a[163]=3;
a[164]=6;
a[165]=6;
a[166]=5;
a[167]=3;
a[168]=9;
a[169]=6;
a[170]=6;
a[171]=7;
a[172]=1;
a[173]=4;
a[174]=8;
a[175]=8;
a[176]=6;
a[177]=2;
a[178]=9;
a[179]=8;
a[180]=5;
a[181]=5;
a[182]=5;
a[183]=3;
a[184]=0;
a[185]=8;
a[186]=0;
a[187]=4;
a[188]=8;
a[189]=7;
a[190]=9;
a[191]=10;
a[192]=0;
a[193]=5;
a[194]=10;
a[195]=8;
a[196]=3;
a[197]=1;
a[198]=8;
a[199]=3;
a[200]=1;
a[201]=10;
a[202]=5;
a[203]=8;
a[204]=2;
a[205]=6;
a[206]=1;
a[207]=7;
a[208]=10;
a[209]=7;
a[210]=9;
a[211]=5;
a[212]=9;
a[213]=3;
a[214]=1;
a[215]=5;
a[216]=0;
a[217]=9;
a[218]=3;
a[219]=6;
a[220]=5;
a[221]=10;
a[222]=7;
a[223]=5;
a[224]=10;
a[225]=8;
a[226]=7;
a[227]=3;
a[228]=1;
a[229]=9;
a[230]=5;
a[231]=8;
a[232]=7;
a[233]=8;
a[234]=5;
a[235]=3;
a[236]=3;
a[237]=0;
a[238]=3;
a[239]=1;
a[240]=10;
a[241]=6;
a[242]=8;
a[243]=8;
a[244]=7;
a[245]=7;
a[246]=1;
a[247]=5;
a[248]=10;
a[249]=1;
a[250]=8;
a[251]=10;
a[252]=1;
a[253]=9;
a[254]=1;
a[255]=1;
}
void dispRes(float arr)
{
printf("result = ");
printf("%f ",arr);
system("pause");
}
int main(int argc,char*argv[])
{
int ii;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements = 256; // number of elements in the input list
int numOutputElements = 0; // number of elements in the output list
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostInput = (float*)malloc(numInputElements*sizeof(float));
hostOutput = (float*)malloc(numOutputElements*sizeof(float));
//cuda memory allocation on the device
hipMalloc((void**)&deviceInput,numInputElements*sizeof(float));
hipMalloc((void**)&deviceOutput,numOutputElements*sizeof(float));
printf("Loading values to the array...\n");
loadVal(hostInput,hostOutput,numInputElements);
//cuda memory copy from host to device
hipMemcpy(deviceInput,hostInput,numInputElements*sizeof(float),hipMemcpyHostToDevice);
//CPU equivalent
//totalCPU(hostInput,hostOutput,numInputElements);
//dispRes(hostOutput[0]);
printf("Calling CUDA kernel...\n");
dim3 DimGrid((numInputElements-1)/BLOCK_SIZE+1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
hipLaunchKernelGGL(( totalKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput,deviceOutput,numInputElements);
//cuda memory copy from device to host
hipMemcpy(hostOutput,deviceOutput,numOutputElements*sizeof(float),hipMemcpyDeviceToHost);
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
dispRes(hostOutput[0]);
free(hostInput);
free(hostOutput);
hipFree(deviceInput);
hipFree(deviceOutput);
return 0;
} | b5fc37e91a9a25654e3ddc483c870067589de24e.cu | #include <stdio.h>
#include<stdlib.h>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#define BLOCK_SIZE 64
__global__ void
totalKernel(float * input, float * output, int len)
{
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x*2;
if((start+t)<len)
{
partialSum[t] = input[start+t];
if(start+t+blockDim.x < len)
partialSum[blockDim.x+t] = input[start+t+blockDim.x];
else
partialSum[blockDim.x+t] =0;
}
else
{
partialSum[t] = 0;
partialSum[blockDim.x+t] = 0;
}
__syncthreads();
for(unsigned int stride = blockDim.x; stride >0; stride/=2){
__syncthreads();
if(t<stride)
partialSum[t]+=partialSum[t+stride];
}
//__syncthreads();
output[blockIdx.x] = partialSum[0];
}
void totalCPU(float * input, float * output, int len)
{
int i=0;
output[0]=0;
for(i=0;i<len;i++)
output[0] +=input[i];
}
void loadVal(float *a, float *b, int len)
{
/*
int i=0;
char buff[256];
FILE *file;
file = fopen("dataSetRaw0.txt","r");
if(!file)
{
printf("No file found");
system("Pause");
exit(0);
}
while(fgets(buff,len,file))
{
a[i] = atof(buff);
i++;
}
for(i=0;i<len;i++)
{
a[i] = i;
}
fclose(file);
*/
a[0]=7;
a[1]=9;
a[2]=1;
a[3]=4;
a[4]=2;
a[5]=8;
a[6]=10;
a[7]=5;
a[8]=10;
a[9]=7;
a[10]=5;
a[11]=7;
a[12]=8;
a[13]=6;
a[14]=4;
a[15]=6;
a[16]=6;
a[17]=3;
a[18]=4;
a[19]=0;
a[20]=1;
a[21]=10;
a[22]=5;
a[23]=8;
a[24]=7;
a[25]=0;
a[26]=2;
a[27]=9;
a[28]=2;
a[29]=8;
a[30]=4;
a[31]=3;
a[32]=2;
a[33]=1;
a[34]=4;
a[35]=10;
a[36]=3;
a[37]=9;
a[38]=6;
a[39]=9;
a[40]=4;
a[41]=7;
a[42]=3;
a[43]=3;
a[44]=3;
a[45]=1;
a[46]=5;
a[47]=5;
a[48]=0;
a[49]=7;
a[50]=7;
a[51]=2;
a[52]=5;
a[53]=7;
a[54]=9;
a[55]=5;
a[56]=8;
a[57]=5;
a[58]=0;
a[59]=10;
a[60]=3;
a[61]=9;
a[62]=5;
a[63]=10;
a[64]=8;
a[65]=4;
a[66]=8;
a[67]=8;
a[68]=2;
a[69]=6;
a[70]=9;
a[71]=6;
a[72]=9;
a[73]=0;
a[74]=9;
a[75]=7;
a[76]=3;
a[77]=1;
a[78]=8;
a[79]=7;
a[80]=0;
a[81]=10;
a[82]=9;
a[83]=8;
a[84]=7;
a[85]=10;
a[86]=9;
a[87]=1;
a[88]=4;
a[89]=3;
a[90]=1;
a[91]=8;
a[92]=2;
a[93]=4;
a[94]=8;
a[95]=1;
a[96]=1;
a[97]=2;
a[98]=7;
a[99]=10;
a[100]=6;
a[101]=10;
a[102]=0;
a[103]=0;
a[104]=2;
a[105]=2;
a[106]=5;
a[107]=1;
a[108]=6;
a[109]=10;
a[110]=2;
a[111]=2;
a[112]=8;
a[113]=10;
a[114]=10;
a[115]=9;
a[116]=4;
a[117]=5;
a[118]=9;
a[119]=3;
a[120]=3;
a[121]=4;
a[122]=6;
a[123]=8;
a[124]=8;
a[125]=9;
a[126]=9;
a[127]=3;
a[128]=1;
a[129]=4;
a[130]=10;
a[131]=7;
a[132]=7;
a[133]=0;
a[134]=4;
a[135]=4;
a[136]=7;
a[137]=7;
a[138]=0;
a[139]=1;
a[140]=5;
a[141]=4;
a[142]=4;
a[143]=8;
a[144]=9;
a[145]=10;
a[146]=10;
a[147]=10;
a[148]=3;
a[149]=4;
a[150]=10;
a[151]=6;
a[152]=9;
a[153]=7;
a[154]=10;
a[155]=10;
a[156]=2;
a[157]=8;
a[158]=5;
a[159]=5;
a[160]=7;
a[161]=9;
a[162]=1;
a[163]=3;
a[164]=6;
a[165]=6;
a[166]=5;
a[167]=3;
a[168]=9;
a[169]=6;
a[170]=6;
a[171]=7;
a[172]=1;
a[173]=4;
a[174]=8;
a[175]=8;
a[176]=6;
a[177]=2;
a[178]=9;
a[179]=8;
a[180]=5;
a[181]=5;
a[182]=5;
a[183]=3;
a[184]=0;
a[185]=8;
a[186]=0;
a[187]=4;
a[188]=8;
a[189]=7;
a[190]=9;
a[191]=10;
a[192]=0;
a[193]=5;
a[194]=10;
a[195]=8;
a[196]=3;
a[197]=1;
a[198]=8;
a[199]=3;
a[200]=1;
a[201]=10;
a[202]=5;
a[203]=8;
a[204]=2;
a[205]=6;
a[206]=1;
a[207]=7;
a[208]=10;
a[209]=7;
a[210]=9;
a[211]=5;
a[212]=9;
a[213]=3;
a[214]=1;
a[215]=5;
a[216]=0;
a[217]=9;
a[218]=3;
a[219]=6;
a[220]=5;
a[221]=10;
a[222]=7;
a[223]=5;
a[224]=10;
a[225]=8;
a[226]=7;
a[227]=3;
a[228]=1;
a[229]=9;
a[230]=5;
a[231]=8;
a[232]=7;
a[233]=8;
a[234]=5;
a[235]=3;
a[236]=3;
a[237]=0;
a[238]=3;
a[239]=1;
a[240]=10;
a[241]=6;
a[242]=8;
a[243]=8;
a[244]=7;
a[245]=7;
a[246]=1;
a[247]=5;
a[248]=10;
a[249]=1;
a[250]=8;
a[251]=10;
a[252]=1;
a[253]=9;
a[254]=1;
a[255]=1;
}
void dispRes(float arr)
{
printf("result = ");
printf("%f ",arr);
system("pause");
}
int main(int argc,char*argv[])
{
int ii;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements = 256; // number of elements in the input list
int numOutputElements = 0; // number of elements in the output list
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostInput = (float*)malloc(numInputElements*sizeof(float));
hostOutput = (float*)malloc(numOutputElements*sizeof(float));
//cuda memory allocation on the device
cudaMalloc((void**)&deviceInput,numInputElements*sizeof(float));
cudaMalloc((void**)&deviceOutput,numOutputElements*sizeof(float));
printf("Loading values to the array...\n");
loadVal(hostInput,hostOutput,numInputElements);
//cuda memory copy from host to device
cudaMemcpy(deviceInput,hostInput,numInputElements*sizeof(float),cudaMemcpyHostToDevice);
//CPU equivalent
//totalCPU(hostInput,hostOutput,numInputElements);
//dispRes(hostOutput[0]);
printf("Calling CUDA kernel...\n");
dim3 DimGrid((numInputElements-1)/BLOCK_SIZE+1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
totalKernel<<<DimGrid,DimBlock>>>(deviceInput,deviceOutput,numInputElements);
//cuda memory copy from device to host
cudaMemcpy(hostOutput,deviceOutput,numOutputElements*sizeof(float),cudaMemcpyDeviceToHost);
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
dispRes(hostOutput[0]);
free(hostInput);
free(hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
} |
d8d9353e866ef5c0d43fa3282e6478bae6cd1b80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "eval.cu"
#include "mgs.cu"
__global__ void update_x_kernel(GT* x, GT* sol, int dim)
{
int BS = blockDim.x;
int bidx = blockIdx.x*BS;
int tidx = threadIdx.x;
int idx = bidx + tidx;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel(GT* x, GT* sol, int dim, int workspace_size, int* x_t_idx)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = blockIdx.x;
sol += path_idx*dim;
x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel_mult(GT* x, GT* sol, int dim, int n_path)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
//int idx = threadIdx.x;
//int idx = bidx + tidx;
//int path_idx = blockIdx.x;
int path_idx = (gridDim.x*blockIdx.y+blockIdx.x)*blockDim.x + threadIdx.x;
int var_idx = blockIdx.z;
if(path_idx<n_path){
sol += path_idx;
x += path_idx;
x[var_idx*n_path] = x[var_idx*n_path] - sol[var_idx*n_path];
}
}
__global__ void update_x_kernel(GT* x, GT* sol, int dim, int workspace_size, int* x_t_idx, int* path_idx_mult)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = path_idx_mult[blockIdx.x];
sol += blockIdx.x*dim;
x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel(GT* x_mult, GT* sol, int dim, int workspace_size,\
int* x_t_idx, int* path_idx_mult, int n_predictor)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = path_idx_mult[blockIdx.x];
sol += blockIdx.x*dim;
//x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
x_mult += path_idx*dim*(n_predictor+1) + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
//x[idx] = x[idx] - sol[idx];
x_mult[idx] = x_mult[idx] - sol[idx];
}
}
__global__ void mult_x_init(GT* x_array, GT* t_array, GT* alpha, \
GT* x_mult, GT* t_mult, GT* one_minor_t_mult, \
int* path_idx_mult, int* x_t_idx_mult, int n_path, int dim, int n_predictor){
int t_idx = threadIdx.x;
int BS = blockDim.x;
int eval_idx = (gridDim.x*blockIdx.y+blockIdx.x)*BS+t_idx;
if(eval_idx < n_path){
int path_idx = path_idx_mult[eval_idx];
//GT* t = t_array + workspace_size*path_idx + x_t_idx_mult[path_idx];
GT* tmp_t = t_array + path_idx*(n_predictor+1) + x_t_idx_mult[path_idx];
//one_minor_t += workspace_size*path_idx;
t_mult[eval_idx] = *tmp_t;
one_minor_t_mult[eval_idx] = (*alpha)*(GT(1.0,0) - t_mult[eval_idx]);;
x_mult += eval_idx;
//GT* x = x_array + workspace_size*path_idx + x_t_idx_mult[path_idx]*dim;
GT* x_tmp = x_array + path_idx*dim*(n_predictor+1)+ + x_t_idx_mult[path_idx]*dim;
for(int var_idx=0; var_idx<dim; var_idx++){
x_mult[var_idx*n_path] = x_tmp[var_idx];
}
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_array, \
double* r_max_array, double* max_x, int workspace_size) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = blockIdx.x;
sol += path_idx*workspace_size;
// sol += path_idx + path_idx*workspace_size;
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
r_max_array[path_idx] = x_norm[0]/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += idx*dim;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_x_norm = sol[var_idx].norm1_double();
if(x_norm < tmp_x_norm) {
x_norm = tmp_x_norm;
}
}
max_array[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel2(GT* sol, int dim, int n_path_continuous, double* max_array, double* max_array_last, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
max_array_last[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel3(GT* sol, int dim, int n_path_continuous, int n_path, int* path_idx_mult, \
double* max_array, double* max_array_last, double* r_max_array, double* max_x) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
max_array_last[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
// Not good for a lot path
__global__ void max_relative_double_kernel_tree(GT* sol, int dim, int dimLog2, double* max_array, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = path_idx_mult[blockIdx.x];
sol += path_idx*workspace_size;
// sol += path_idx + path_idx*workspace_size;
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
r_max_array[path_idx] = x_norm[0]/max_x[path_idx];
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_array, int workspace_size, int* x_t_idx) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = blockIdx.x;
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
}
}
__global__ void max_x_double_kernel_tree(GT* sol, int dim, int dimLog2, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = path_idx_mult[blockIdx.x];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
}
}
__global__ void max_x_double_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
}
}
__global__ void max_x_double_kernel_align(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
}
}
__global__ void zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult, GT* newton_sol_mult, int* newton_sol_mult_idx,
GT* one_minor_t, GT* one_minor_t_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
newton_sol_mult += idx;
newton_sol_mult[0] = sol[0];
for(int var_idx=1; var_idx<dim; var_idx++){
newton_sol_mult[var_idx*n_path_continuous] = sol[var_idx];
}
newton_sol_mult_idx[idx] = path_idx;
path_idx_mult[idx] = idx;
one_minor_t_mult[idx] = one_minor_t[path_idx*workspace_size];
}
}
__global__ void max_x_double_zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[idx] = x_norm;
}
}
__global__ void max_relative_double_zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, double* r_max_array, double* max_x) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[idx] = x_norm;
r_max_array[idx] = x_norm/max_x[idx];
}
}
__global__ void check_kernel(double* max_f_val_gpu, double* r_max_f_val_gpu, \
double* max_delta_x_gpu, double* r_max_delta_x_gpu, int* path_success, int* success, \
int* n_point_mult, int* x_t_idx_mult, int n_array, int workspace_size, int n_path, int* end_range, \
double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( path_success[path_idx] == 0 && ( max_f_val_gpu[path_idx] < err_roundoff \
|| r_max_f_val_gpu[path_idx] < err_roundoff || max_delta_x_gpu[path_idx] < err_roundoff \
|| r_max_delta_x_gpu[path_idx] < err_roundoff) ){
success[path_idx] = 1;
n_point_mult[path_idx]++;
// remove %
x_t_idx_mult[path_idx] = (x_t_idx_mult[path_idx]+1)%n_array;
}
}
}
__global__ void check_kernel(double* max_delta_x_gpu, double* r_max_delta_x_gpu, \
int* success, int n_path, int* end_range, double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_delta_x_gpu[path_idx] < err_roundoff || r_max_delta_x_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
}
}
__global__ void check_kernel(double* max_f_val_gpu, double* r_max_f_val_gpu, double* max_f_val_last_gpu, \
int* success, int n_path, int* end_range, double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
if(max_f_val_gpu[path_idx]>max_f_val_last_gpu[path_idx]){
success[path_idx] = -1;
}
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_f_val_gpu[path_idx] < err_roundoff || r_max_f_val_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
else{
max_f_val_last_gpu[path_idx] = max_f_val_gpu[path_idx];
}
}
}
__global__ void newton_init(int* success, int n_path){
int newton_path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(newton_path_idx<n_path){
success[newton_path_idx] = 0;
}
}
__global__ void check_kernel_mult(double* max_delta_x_gpu, double* r_max_delta_x_gpu, \
int* success, int n_path, int* end_range, int* path_idx_mult, double err_min_round_off, double err_min_round_off_end){
int newton_path_idx = threadIdx.x+blockIdx.x*blockDim.x;
int path_idx = path_idx_mult[newton_path_idx];
if(newton_path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_delta_x_gpu[path_idx] < err_roundoff || r_max_delta_x_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_delta_x ) {
__shared__ double delta_x[max_array_size];
int j = threadIdx.x;
// max for the norm
delta_x[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(delta_x[j] < delta_x[j+half_size]) {
delta_x[j] = delta_x[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(delta_x[j] < delta_x[j+half_size]) {
delta_x[j] = delta_x[j+half_size];
}
}
}
if(j == 0) {
*max_delta_x = delta_x[0];
}
}
bool newton_single(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool end_range=false) {
bool debug = false;
//debug = true;
bool success = false;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
double max_x;
double max_f_val;
double r_max_f_val;
double max_delta_x;
double r_max_delta_x;
double* max_x_gpu;
hipMalloc((void **) &max_x_gpu, sizeof(double));
double* max_f_val_gpu;
hipMalloc((void **) &max_f_val_gpu, sizeof(double));
double* max_delta_x_gpu;
hipMalloc((void **) &max_delta_x_gpu, sizeof(double));
double err_round_off;
if(end_range==true){
err_round_off = path_parameter.err_min_round_off_refine;
}
else{
err_round_off = path_parameter.err_min_round_off;
}
hipLaunchKernelGGL(( array_max_double_kernel), dim3(1), dim3(inst.dim), 0, 0, workspace.x, inst.dim, \
dimLog2, max_x_gpu);
hipMemcpy(&max_x, max_x_gpu, sizeof(double),
hipMemcpyDeviceToHost);
if(debug){
std::cout << " max_x : " << max_x << std::endl;
}
eval(workspace, inst);
inst.n_eval_GPU++;
hipLaunchKernelGGL(( array_max_double_kernel), dim3(1), dim3(inst.n_eq), 0, 0, workspace.f_val, inst.n_eq, \
rowsLog2, max_f_val_gpu);
hipMemcpy(&max_f_val, max_f_val_gpu, sizeof(double),
hipMemcpyDeviceToHost);
r_max_f_val = max_f_val/max_x;
if(debug){
std::cout << " residual(a&r): " << max_f_val \
<< " " << r_max_f_val << std::endl;
}
if(max_f_val < err_round_off || r_max_f_val < err_round_off){
success = 1;
return success;
}
double last_max_f_val = max_f_val;
for (int i = 0; i < path_parameter.max_it; i++) {
if(debug){
cout << " Iteration " << i << endl;
}
if (inst.n_eq <= BS_QR) {
mgs_small_with_delta(workspace.matrix, workspace.R, workspace.sol,
inst.n_eq, inst.dim + 1, max_delta_x_gpu);
/*CT* tmp_sol = workspace.get_sol();
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << var_idx << " " << tmp_sol[var_idx];
}*/
} else {
mgs_large_block(workspace.matrix, workspace.R, workspace.P, workspace.sol, inst.n_eq,\
inst.dim + 1);
//mgs_large(workspace.V, workspace.R, workspace.sol, inst.n_eq, inst.dim+1);
hipLaunchKernelGGL(( array_max_double_kernel), dim3(1),dim3(inst.dim), 0, 0, workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
inst.n_mgs_GPU++;
hipMemcpy(&max_delta_x, max_delta_x_gpu, sizeof(double),
hipMemcpyDeviceToHost);
r_max_delta_x = max_delta_x/max_x;
if(debug){
std::cout << " correction(a&r): " << max_delta_x \
<< " " << r_max_delta_x << std::endl;
}
hipLaunchKernelGGL(( update_x_kernel), dim3(inst.dim_grid), dim3(inst.dim_BS), 0, 0, workspace.x, workspace.sol,
inst.dim);
if(max_delta_x < err_round_off || r_max_delta_x < err_round_off){
success = 1;
break;
}
hipLaunchKernelGGL(( array_max_double_kernel), dim3(1), dim3(inst.dim), 0, 0, workspace.x, inst.dim, \
dimLog2, max_x_gpu);
hipMemcpy(&max_x, max_x_gpu, sizeof(double),
hipMemcpyDeviceToHost);
if(debug){
std::cout << " max_x : " << max_x << std::endl;
}
eval(workspace, inst);
inst.n_eval_GPU++;
hipLaunchKernelGGL(( array_max_double_kernel), dim3(1), dim3(inst.n_eq), 0, 0, workspace.f_val, inst.n_eq,
rowsLog2, max_f_val_gpu);
hipMemcpy(&max_f_val, max_f_val_gpu, sizeof(double),
hipMemcpyDeviceToHost);
r_max_f_val = max_f_val/max_x;
if(debug){
std::cout << " residual(a&r): " << max_f_val \
<< " " << r_max_f_val << std::endl;
}
if (max_f_val > last_max_f_val) {
success = 0;
break;
}
if(max_f_val < err_round_off || r_max_f_val < err_round_off){
success = 1;
break;
}
last_max_f_val = max_f_val;
}
return success;
}
bool newton(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool debug=false) {
debug = true;
int path_idx_test = 0;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
int n_path = workspace.n_path;
for(int path_idx=0; path_idx<n_path; path_idx++){
workspace.newton_success_host[path_idx] = workspace.path_success_host[path_idx];
//std::cout << "workspace.success_host[path_idx] = " \
<< workspace.success_host[path_idx] << std::endl;
}
hipMemcpy(workspace.newton_success, workspace.newton_success_host, n_path*sizeof(int),
hipMemcpyHostToDevice);
int n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
/*if(debug){
std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}
}*/
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
hipMemcpyHostToDevice);
dim3 max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
hipLaunchKernelGGL(( max_x_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.x_array, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
hipMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
//}
}
}
eval(workspace, inst);
inst.n_eval_GPU++;
hipLaunchKernelGGL(( max_relative_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
if(debug){
hipMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
dim3 check_grid = get_grid(n_path,inst.predict_BS,1);
for (int it_idx = 0; it_idx < path_parameter.max_it; it_idx++) {
if(debug){
std::cout << " Iteration " << it_idx << std::endl;
}
if(inst.dim <= BS_QR){
mgs_small_idx(workspace.V, workspace.R, workspace.sol, inst.n_eq, inst.dim+1,\
workspace.workspace_size, n_path_continuous, workspace.path_idx);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
/*CT** sol_gpu = new CT*[n_path];
CT** matrix_gpu_q = new CT*[n_path];
CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
sol_gpu[path_idx] = workspace.get_sol(path_idx);
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << path_idx << " " << var_idx << " " << sol_gpu[path_idx][var_idx];
}
matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
}*/
hipLaunchKernelGGL(( max_relative_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.sol, inst.dim, n_path_continuous, \
workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
}
else{
mgs_large_block(workspace.V, workspace.R, workspace.P, workspace.sol, inst.n_eq, inst.dim+1);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
if(debug){
hipMemcpy(workspace.max_delta_x_host, workspace.max_delta_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_delta_x_host, workspace.r_max_delta_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_delta_x " << path_idx << " = " << workspace.max_delta_x_host[path_idx] \
<< " " << workspace.r_max_delta_x_host[path_idx] << std::endl;
//}
}
}
hipLaunchKernelGGL(( update_x_kernel), dim3(n_path_continuous), dim3(inst.dim), 0, 0, workspace.x_array, workspace.sol, inst.dim, \
workspace.workspace_size, workspace.x_t_idx_mult, workspace.path_idx);
//workspace.print_x_mult();
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int), \
hipMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}*/
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
hipMemcpyHostToDevice);
//std::cout << "Correct X:" << std::endl;
//workspace.print_x_mult();
hipLaunchKernelGGL(( max_x_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.x_array, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
hipMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
//}
}
}
/*hipMemcpy(max_x_host, max_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
std::cout << " max_x_gpu " << path_idx << "= " << max_x_host[path_idx] << std::endl;
}*/
eval(workspace, inst);
//inst.n_eval_GPU++;
hipLaunchKernelGGL(( max_relative_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.f_val, inst.n_eq, \
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
hipMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int),
hipMemcpyHostToDevice);
if(debug){
hipMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
//}
}
}
}
dim3 check_grid2 = get_grid(n_path,inst.predict_BS,1);
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu,workspace. max_delta_x_gpu, \
workspace.r_max_delta_x_gpu, workspace.path_success, workspace.newton_success, workspace.n_point_mult, workspace.x_t_idx_mult, workspace.n_array, \
workspace.workspace_size, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
hipMemcpyDeviceToHost);
if(debug){
std::cout << workspace.newton_success_host[path_idx_test] << std::endl;
}
return true;
}
bool newton_align(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool debug=false) {
//std::cout << "Newton Align" << std::endl;
//debug = true;
int path_idx_test = 0;
int debug_all = false;
//debug_all = true;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
int n_path = workspace.n_path;
for(int path_idx=0; path_idx<n_path; path_idx++){
workspace.newton_success_host[path_idx] = workspace.path_success_host[path_idx];
//std::cout << "workspace.success_host[path_idx] = " \
<< workspace.success_host[path_idx] << std::endl;
}
hipMemcpy(workspace.newton_success, workspace.newton_success_host, n_path*sizeof(int),
hipMemcpyHostToDevice);
//std::cout << "newton_success" << std::endl;
int n_path_continuous = 0;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
hipMemcpyHostToDevice);
dim3 init_grid = get_grid(n_path_continuous, inst.coef_BS, 1);
hipLaunchKernelGGL(( mult_x_init), dim3(init_grid), dim3(inst.coef_BS), 0, 0, workspace.x_array, workspace.t_array, workspace.alpha_gpu, \
workspace.x_mult, workspace.newton_t_mult, workspace.one_minor_t, \
workspace.path_idx, workspace.x_t_idx_mult, n_path_continuous, inst.dim, workspace.n_predictor);
dim3 max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
hipLaunchKernelGGL(( max_x_double_kernel_align), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.x_mult, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
hipMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
}
}
}
eval_mult(workspace, inst);
inst.n_eval_GPU++;
/*max_relative_double_kernel2<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);*/
hipLaunchKernelGGL(( max_relative_double_kernel3), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.n_path, workspace.path_idx, \
workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu);
if(debug){
hipMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
}
dim3 check_grid = get_grid(n_path,inst.predict_BS,1);
for (int it_idx = 0; it_idx < path_parameter.max_it; it_idx++) {
if(debug){
std::cout << " Iteration " << it_idx << std::endl;
}
if(inst.dim <= BS_QR){
/*std::cout << "matrix" << std::endl;
CT** matrix_gpu_q = new CT*[n_path];
//CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
//matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
if(path_idx==path_idx_test){
for(int var_idx=1; var_idx<=1; var_idx++){
CT tmp(0.0,0.0);
for(int eq_idx=0; eq_idx<inst.n_eq; eq_idx++){
std::cout << var_idx << " " << eq_idx << " " \
<< matrix_gpu_q[path_idx][eq_idx+var_idx*inst.n_eq];
tmp += matrix_gpu_q[path_idx][eq_idx+var_idx*inst.n_eq];
}
std::cout << tmp;
}
std::cout << std::endl;
}
}*/
//std::cout << "n_matrix = " << workspace.n_matrix << std::endl;
mgs_small1_idx(workspace.matrix_horizontal_mult, workspace.R, workspace.sol, inst.n_eq, inst.dim+1,\
workspace.workspace_size, workspace.n_matrix, workspace.n_matrix_R, n_path_continuous, workspace.path_idx);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
/*CT** sol_gpu = new CT*[n_path];
//CT** matrix_gpu_q = new CT*[n_path];
//CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
sol_gpu[path_idx] = workspace.get_sol(path_idx);
//for(int var_idx=0; var_idx<inst.dim; var_idx++){
if(path_idx==path_idx_test){
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << path_idx << " " << var_idx << " " << sol_gpu[path_idx][var_idx];
}
}
//matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
//matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
}*/
hipLaunchKernelGGL(( max_relative_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.sol, inst.dim, n_path_continuous, \
workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
}
else{
mgs_large_block(workspace.V, workspace.R, workspace.P, workspace.sol, inst.n_eq, inst.dim+1);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
if(debug){
hipMemcpy(workspace.max_delta_x_host, workspace.max_delta_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_delta_x_host, workspace.r_max_delta_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test|| debug_all){
std::cout << " max_delta_x " << path_idx << " = " << workspace.max_delta_x_host[path_idx] \
<< " " << workspace.r_max_delta_x_host[path_idx] << std::endl;
}
}
}
hipLaunchKernelGGL(( update_x_kernel), dim3(n_path_continuous), dim3(inst.dim), 0, 0, workspace.x_array, workspace.sol, inst.dim, \
workspace.workspace_size, workspace.x_t_idx_mult, workspace.path_idx, workspace.n_predictor);
//workspace.print_x_mult();
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int), \
hipMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
if(n_path_continuous==0){
break;
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous = " << n_path_continuous << " : ";
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << workspace.path_idx_host[path_idx] << ", ";
}
std::cout << std::endl;*/
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
hipMemcpyHostToDevice);
//std::cout << "Correct X:" << std::endl;
//workspace.print_x_mult();
init_grid = get_grid(n_path_continuous, inst.coef_BS, 1);
hipLaunchKernelGGL(( mult_x_init), dim3(init_grid), dim3(inst.coef_BS), 0, 0, workspace.x_array, workspace.t_array, workspace.alpha_gpu, \
workspace.x_mult, workspace.newton_t_mult, workspace.one_minor_t, \
workspace.path_idx, workspace.x_t_idx_mult, n_path_continuous, inst.dim, workspace.n_predictor);
hipLaunchKernelGGL(( max_x_double_kernel_align), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.x_mult, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
hipMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
}
}
}
eval_mult(workspace, inst);
//inst.n_eval_GPU++;
/*GT* tmp_f_val = workspace.matrix_horizontal_mult + inst.n_eq*inst.dim;
hipLaunchKernelGGL(( max_relative_double_kernel), dim3(max_grid), dim3(inst.predict_BS), 0, 0, tmp_f_val, inst.n_eq, \
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.n_matrix, workspace.path_idx);*/
hipLaunchKernelGGL(( max_relative_double_kernel3), dim3(max_grid), dim3(inst.predict_BS), 0, 0, workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.n_path, workspace.path_idx, \
workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu);
if(debug){
hipMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
hipMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
}
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_f_val_last_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
hipMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
if(n_path_continuous==0){
break;
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous = " << n_path_continuous << " : ";
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << workspace.path_idx_host[path_idx] << ", ";
}
std::cout << std::endl;*/
hipMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int),
hipMemcpyHostToDevice);
}
dim3 check_grid2 = get_grid(n_path,inst.predict_BS,1);
hipLaunchKernelGGL(( check_kernel), dim3(check_grid), dim3(inst.predict_BS), 0, 0, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu,workspace. max_delta_x_gpu, \
workspace.r_max_delta_x_gpu, workspace.path_success, workspace.newton_success, workspace.n_point_mult, workspace.x_t_idx_mult, workspace.n_array, \
workspace.workspace_size, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
hipMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
hipMemcpyDeviceToHost);
if(debug){
std::cout << workspace.newton_success_host[path_idx_test] << std::endl;
}
return true;
}
bool GPU_Newton(CPUInstHom& hom, Parameter path_parameter, CT* cpu_sol0, CT cpu_t, CT*& x_new, int n_path) {
cout << "Newton ";
cout << "max_it = " << path_parameter.max_it << endl;
cout << "eps = " << path_parameter.err_max_delta_x << endl;
//clock_t begin = clock();
cuda_set();
GPUInst inst(hom, n_path);
int mon_pos_size = hom.CPU_inst_hom_mon.mon_pos_size;
if(MON_EVAL_METHOD == 1){
mon_pos_size = hom.CPU_inst_hom_block.mon_pos_block_size + inst.n_mon_level[0]*2;
}
GPUWorkspace workspace(mon_pos_size, inst.n_coef, inst.n_constant, inst.n_eq, inst.dim, path_parameter.n_predictor, inst.alpha);
workspace.update_x_t_value(cpu_sol0, cpu_t);
clock_t begin = clock();
bool success = newton(workspace, inst, path_parameter);
clock_t end = clock();
double timeSec_Newton = (end - begin) / static_cast<double>( CLOCKS_PER_SEC );
cout << "Path GPU Newton Time: "<< timeSec_Newton << endl;
x_new = workspace.get_x();
/*clock_t end = clock();
double timeSec = (end - begin) / static_cast<double>( CLOCKS_PER_SEC );
cout << "done: "<< timeSec << endl;*/
return success;
}
| d8d9353e866ef5c0d43fa3282e6478bae6cd1b80.cu | #include "eval.cu"
#include "mgs.cu"
__global__ void update_x_kernel(GT* x, GT* sol, int dim)
{
int BS = blockDim.x;
int bidx = blockIdx.x*BS;
int tidx = threadIdx.x;
int idx = bidx + tidx;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel(GT* x, GT* sol, int dim, int workspace_size, int* x_t_idx)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = blockIdx.x;
sol += path_idx*dim;
x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel_mult(GT* x, GT* sol, int dim, int n_path)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
//int idx = threadIdx.x;
//int idx = bidx + tidx;
//int path_idx = blockIdx.x;
int path_idx = (gridDim.x*blockIdx.y+blockIdx.x)*blockDim.x + threadIdx.x;
int var_idx = blockIdx.z;
if(path_idx<n_path){
sol += path_idx;
x += path_idx;
x[var_idx*n_path] = x[var_idx*n_path] - sol[var_idx*n_path];
}
}
__global__ void update_x_kernel(GT* x, GT* sol, int dim, int workspace_size, int* x_t_idx, int* path_idx_mult)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = path_idx_mult[blockIdx.x];
sol += blockIdx.x*dim;
x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
x[idx] = x[idx] - sol[idx];
}
}
__global__ void update_x_kernel(GT* x_mult, GT* sol, int dim, int workspace_size,\
int* x_t_idx, int* path_idx_mult, int n_predictor)
{
//int BS = blockDim.x;
//int bidx = blockIdx.x*BS;
int idx = threadIdx.x;
//int idx = bidx + tidx;
int path_idx = path_idx_mult[blockIdx.x];
sol += blockIdx.x*dim;
//x += path_idx*workspace_size + x_t_idx[path_idx]*dim;
x_mult += path_idx*dim*(n_predictor+1) + x_t_idx[path_idx]*dim;
/*int path_idx = blockIdx.z;
x_predictor += path_idx*np_predictor*dim;
t_predictor += path_idx*np_predictor;
x_new += path_idx*dim;*/
if(idx < dim) {
//x[idx] = x[idx] - sol[idx];
x_mult[idx] = x_mult[idx] - sol[idx];
}
}
__global__ void mult_x_init(GT* x_array, GT* t_array, GT* alpha, \
GT* x_mult, GT* t_mult, GT* one_minor_t_mult, \
int* path_idx_mult, int* x_t_idx_mult, int n_path, int dim, int n_predictor){
int t_idx = threadIdx.x;
int BS = blockDim.x;
int eval_idx = (gridDim.x*blockIdx.y+blockIdx.x)*BS+t_idx;
if(eval_idx < n_path){
int path_idx = path_idx_mult[eval_idx];
//GT* t = t_array + workspace_size*path_idx + x_t_idx_mult[path_idx];
GT* tmp_t = t_array + path_idx*(n_predictor+1) + x_t_idx_mult[path_idx];
//one_minor_t += workspace_size*path_idx;
t_mult[eval_idx] = *tmp_t;
one_minor_t_mult[eval_idx] = (*alpha)*(GT(1.0,0) - t_mult[eval_idx]);;
x_mult += eval_idx;
//GT* x = x_array + workspace_size*path_idx + x_t_idx_mult[path_idx]*dim;
GT* x_tmp = x_array + path_idx*dim*(n_predictor+1)+ + x_t_idx_mult[path_idx]*dim;
for(int var_idx=0; var_idx<dim; var_idx++){
x_mult[var_idx*n_path] = x_tmp[var_idx];
}
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_array, \
double* r_max_array, double* max_x, int workspace_size) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = blockIdx.x;
sol += path_idx*workspace_size;
// sol += path_idx + path_idx*workspace_size;
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
r_max_array[path_idx] = x_norm[0]/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += idx*dim;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_x_norm = sol[var_idx].norm1_double();
if(x_norm < tmp_x_norm) {
x_norm = tmp_x_norm;
}
}
max_array[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel2(GT* sol, int dim, int n_path_continuous, double* max_array, double* max_array_last, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
max_array_last[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
__global__ void max_relative_double_kernel3(GT* sol, int dim, int n_path_continuous, int n_path, int* path_idx_mult, \
double* max_array, double* max_array_last, double* r_max_array, double* max_x) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
max_array_last[path_idx] = x_norm;
r_max_array[path_idx] = x_norm/max_x[path_idx];
}
}
// Not good for a lot path
__global__ void max_relative_double_kernel_tree(GT* sol, int dim, int dimLog2, double* max_array, \
double* r_max_array, double* max_x, int workspace_size, int* path_idx_mult) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = path_idx_mult[blockIdx.x];
sol += path_idx*workspace_size;
// sol += path_idx + path_idx*workspace_size;
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
r_max_array[path_idx] = x_norm[0]/max_x[path_idx];
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_array, int workspace_size, int* x_t_idx) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = blockIdx.x;
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
}
}
__global__ void max_x_double_kernel_tree(GT* sol, int dim, int dimLog2, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
__shared__ double x_norm[max_array_size];
int j = threadIdx.x;
// max for the norm
int path_idx = path_idx_mult[blockIdx.x];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
x_norm[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(x_norm[j] < x_norm[j+half_size]) {
x_norm[j] = x_norm[j+half_size];
}
}
}
if(j == 0) {
max_array[path_idx] = x_norm[0];
}
}
__global__ void max_x_double_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
}
}
__global__ void max_x_double_kernel_align(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[path_idx] = x_norm;
}
}
__global__ void zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, \
int workspace_size, int* x_t_idx, int* path_idx_mult, GT* newton_sol_mult, int* newton_sol_mult_idx,
GT* one_minor_t, GT* one_minor_t_mult) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
int path_idx = path_idx_mult[idx];
sol += path_idx*workspace_size + dim*x_t_idx[path_idx];
newton_sol_mult += idx;
newton_sol_mult[0] = sol[0];
for(int var_idx=1; var_idx<dim; var_idx++){
newton_sol_mult[var_idx*n_path_continuous] = sol[var_idx];
}
newton_sol_mult_idx[idx] = path_idx;
path_idx_mult[idx] = idx;
one_minor_t_mult[idx] = one_minor_t[path_idx*workspace_size];
}
}
__global__ void max_x_double_zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[idx] = x_norm;
}
}
__global__ void max_relative_double_zip_kernel(GT* sol, int dim, int n_path_continuous, double* max_array, double* r_max_array, double* max_x) {
//__shared__ double x_norm[max_array_size];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n_path_continuous){
sol += idx;
double x_norm = sol[0].norm1_double();
for(int var_idx=1; var_idx<dim; var_idx++){
double tmp_norm_x = sol[var_idx*n_path_continuous].norm1_double();
if(x_norm < tmp_norm_x) {
x_norm = tmp_norm_x;
}
}
max_array[idx] = x_norm;
r_max_array[idx] = x_norm/max_x[idx];
}
}
__global__ void check_kernel(double* max_f_val_gpu, double* r_max_f_val_gpu, \
double* max_delta_x_gpu, double* r_max_delta_x_gpu, int* path_success, int* success, \
int* n_point_mult, int* x_t_idx_mult, int n_array, int workspace_size, int n_path, int* end_range, \
double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( path_success[path_idx] == 0 && ( max_f_val_gpu[path_idx] < err_roundoff \
|| r_max_f_val_gpu[path_idx] < err_roundoff || max_delta_x_gpu[path_idx] < err_roundoff \
|| r_max_delta_x_gpu[path_idx] < err_roundoff) ){
success[path_idx] = 1;
n_point_mult[path_idx]++;
// remove %
x_t_idx_mult[path_idx] = (x_t_idx_mult[path_idx]+1)%n_array;
}
}
}
__global__ void check_kernel(double* max_delta_x_gpu, double* r_max_delta_x_gpu, \
int* success, int n_path, int* end_range, double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_delta_x_gpu[path_idx] < err_roundoff || r_max_delta_x_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
}
}
__global__ void check_kernel(double* max_f_val_gpu, double* r_max_f_val_gpu, double* max_f_val_last_gpu, \
int* success, int n_path, int* end_range, double err_min_round_off, double err_min_round_off_end){
int path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(path_idx<n_path){
if(max_f_val_gpu[path_idx]>max_f_val_last_gpu[path_idx]){
success[path_idx] = -1;
}
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_f_val_gpu[path_idx] < err_roundoff || r_max_f_val_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
else{
max_f_val_last_gpu[path_idx] = max_f_val_gpu[path_idx];
}
}
}
__global__ void newton_init(int* success, int n_path){
int newton_path_idx = threadIdx.x+blockIdx.x*blockDim.x;
if(newton_path_idx<n_path){
success[newton_path_idx] = 0;
}
}
__global__ void check_kernel_mult(double* max_delta_x_gpu, double* r_max_delta_x_gpu, \
int* success, int n_path, int* end_range, int* path_idx_mult, double err_min_round_off, double err_min_round_off_end){
int newton_path_idx = threadIdx.x+blockIdx.x*blockDim.x;
int path_idx = path_idx_mult[newton_path_idx];
if(newton_path_idx<n_path){
double err_roundoff;
if(end_range[path_idx]==1){
err_roundoff = err_min_round_off_end;
}
else{
err_roundoff = err_min_round_off;
}
if( max_delta_x_gpu[path_idx] < err_roundoff || r_max_delta_x_gpu[path_idx] < err_roundoff ){
success[path_idx] = 1;
}
}
}
__global__ void array_max_double_kernel(GT* sol, int dim, int dimLog2, double* max_delta_x ) {
__shared__ double delta_x[max_array_size];
int j = threadIdx.x;
// max for the norm
delta_x[j] = sol[j].norm1_double();
dimLog2 -= 1;
int half_size = 1 << (dimLog2);// sum for the norm
if(half_size > 16) {
__syncthreads();
}
if(j + half_size < dim) {
if(delta_x[j] < delta_x[j+half_size]) {
delta_x[j] = delta_x[j+half_size];
}
}
for(int k=0; k < dimLog2; k++) {
if(half_size > 16) {
__syncthreads();
}
half_size /= 2;
if(j < half_size) {
if(delta_x[j] < delta_x[j+half_size]) {
delta_x[j] = delta_x[j+half_size];
}
}
}
if(j == 0) {
*max_delta_x = delta_x[0];
}
}
bool newton_single(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool end_range=false) {
bool debug = false;
//debug = true;
bool success = false;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
double max_x;
double max_f_val;
double r_max_f_val;
double max_delta_x;
double r_max_delta_x;
double* max_x_gpu;
cudaMalloc((void **) &max_x_gpu, sizeof(double));
double* max_f_val_gpu;
cudaMalloc((void **) &max_f_val_gpu, sizeof(double));
double* max_delta_x_gpu;
cudaMalloc((void **) &max_delta_x_gpu, sizeof(double));
double err_round_off;
if(end_range==true){
err_round_off = path_parameter.err_min_round_off_refine;
}
else{
err_round_off = path_parameter.err_min_round_off;
}
array_max_double_kernel<<<1, inst.dim>>>(workspace.x, inst.dim, \
dimLog2, max_x_gpu);
cudaMemcpy(&max_x, max_x_gpu, sizeof(double),
cudaMemcpyDeviceToHost);
if(debug){
std::cout << " max_x : " << max_x << std::endl;
}
eval(workspace, inst);
inst.n_eval_GPU++;
array_max_double_kernel<<<1, inst.n_eq>>>(workspace.f_val, inst.n_eq, \
rowsLog2, max_f_val_gpu);
cudaMemcpy(&max_f_val, max_f_val_gpu, sizeof(double),
cudaMemcpyDeviceToHost);
r_max_f_val = max_f_val/max_x;
if(debug){
std::cout << " residual(a&r): " << max_f_val \
<< " " << r_max_f_val << std::endl;
}
if(max_f_val < err_round_off || r_max_f_val < err_round_off){
success = 1;
return success;
}
double last_max_f_val = max_f_val;
for (int i = 0; i < path_parameter.max_it; i++) {
if(debug){
cout << " Iteration " << i << endl;
}
if (inst.n_eq <= BS_QR) {
mgs_small_with_delta(workspace.matrix, workspace.R, workspace.sol,
inst.n_eq, inst.dim + 1, max_delta_x_gpu);
/*CT* tmp_sol = workspace.get_sol();
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << var_idx << " " << tmp_sol[var_idx];
}*/
} else {
mgs_large_block(workspace.matrix, workspace.R, workspace.P, workspace.sol, inst.n_eq,\
inst.dim + 1);
//mgs_large(workspace.V, workspace.R, workspace.sol, inst.n_eq, inst.dim+1);
array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
inst.n_mgs_GPU++;
cudaMemcpy(&max_delta_x, max_delta_x_gpu, sizeof(double),
cudaMemcpyDeviceToHost);
r_max_delta_x = max_delta_x/max_x;
if(debug){
std::cout << " correction(a&r): " << max_delta_x \
<< " " << r_max_delta_x << std::endl;
}
update_x_kernel<<<inst.dim_grid, inst.dim_BS>>>(workspace.x, workspace.sol,
inst.dim);
if(max_delta_x < err_round_off || r_max_delta_x < err_round_off){
success = 1;
break;
}
array_max_double_kernel<<<1, inst.dim>>>(workspace.x, inst.dim, \
dimLog2, max_x_gpu);
cudaMemcpy(&max_x, max_x_gpu, sizeof(double),
cudaMemcpyDeviceToHost);
if(debug){
std::cout << " max_x : " << max_x << std::endl;
}
eval(workspace, inst);
inst.n_eval_GPU++;
array_max_double_kernel<<<1, inst.n_eq>>>(workspace.f_val, inst.n_eq,
rowsLog2, max_f_val_gpu);
cudaMemcpy(&max_f_val, max_f_val_gpu, sizeof(double),
cudaMemcpyDeviceToHost);
r_max_f_val = max_f_val/max_x;
if(debug){
std::cout << " residual(a&r): " << max_f_val \
<< " " << r_max_f_val << std::endl;
}
if (max_f_val > last_max_f_val) {
success = 0;
break;
}
if(max_f_val < err_round_off || r_max_f_val < err_round_off){
success = 1;
break;
}
last_max_f_val = max_f_val;
}
return success;
}
bool newton(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool debug=false) {
debug = true;
int path_idx_test = 0;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
int n_path = workspace.n_path;
for(int path_idx=0; path_idx<n_path; path_idx++){
workspace.newton_success_host[path_idx] = workspace.path_success_host[path_idx];
//std::cout << "workspace.success_host[path_idx] = " \
<< workspace.success_host[path_idx] << std::endl;
}
cudaMemcpy(workspace.newton_success, workspace.newton_success_host, n_path*sizeof(int),
cudaMemcpyHostToDevice);
int n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
/*if(debug){
std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}
}*/
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
cudaMemcpyHostToDevice);
dim3 max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
max_x_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.x_array, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
cudaMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
//}
}
}
eval(workspace, inst);
inst.n_eval_GPU++;
max_relative_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
if(debug){
cudaMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
dim3 check_grid = get_grid(n_path,inst.predict_BS,1);
for (int it_idx = 0; it_idx < path_parameter.max_it; it_idx++) {
if(debug){
std::cout << " Iteration " << it_idx << std::endl;
}
if(inst.dim <= BS_QR){
mgs_small_idx(workspace.V, workspace.R, workspace.sol, inst.n_eq, inst.dim+1,\
workspace.workspace_size, n_path_continuous, workspace.path_idx);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
/*CT** sol_gpu = new CT*[n_path];
CT** matrix_gpu_q = new CT*[n_path];
CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
sol_gpu[path_idx] = workspace.get_sol(path_idx);
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << path_idx << " " << var_idx << " " << sol_gpu[path_idx][var_idx];
}
matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
}*/
max_relative_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.sol, inst.dim, n_path_continuous, \
workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
}
else{
mgs_large_block(workspace.V, workspace.R, workspace.P, workspace.sol, inst.n_eq, inst.dim+1);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
if(debug){
cudaMemcpy(workspace.max_delta_x_host, workspace.max_delta_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_delta_x_host, workspace.r_max_delta_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_delta_x " << path_idx << " = " << workspace.max_delta_x_host[path_idx] \
<< " " << workspace.r_max_delta_x_host[path_idx] << std::endl;
//}
}
}
update_x_kernel<<<n_path_continuous, inst.dim>>>(workspace.x_array, workspace.sol, inst.dim, \
workspace.workspace_size, workspace.x_t_idx_mult, workspace.path_idx);
//workspace.print_x_mult();
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int), \
cudaMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}*/
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
cudaMemcpyHostToDevice);
//std::cout << "Correct X:" << std::endl;
//workspace.print_x_mult();
max_x_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.x_array, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
cudaMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
//}
}
}
/*cudaMemcpy(max_x_host, max_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
std::cout << " max_x_gpu " << path_idx << "= " << max_x_host[path_idx] << std::endl;
}*/
eval(workspace, inst);
//inst.n_eval_GPU++;
max_relative_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq, \
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
cudaMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
std::cout << "n_path_continuous" << std::endl;
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << path_idx << " " << workspace.path_idx_host[path_idx] << std::endl;
}
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int),
cudaMemcpyHostToDevice);
if(debug){
cudaMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
//if(path_idx == path_idx_test){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
//}
}
}
}
dim3 check_grid2 = get_grid(n_path,inst.predict_BS,1);
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_f_val_gpu, workspace.r_max_f_val_gpu,workspace. max_delta_x_gpu, \
workspace.r_max_delta_x_gpu, workspace.path_success, workspace.newton_success, workspace.n_point_mult, workspace.x_t_idx_mult, workspace.n_array, \
workspace.workspace_size, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
cudaMemcpyDeviceToHost);
if(debug){
std::cout << workspace.newton_success_host[path_idx_test] << std::endl;
}
return true;
}
bool newton_align(GPUWorkspace& workspace, GPUInst& inst, Parameter path_parameter, bool debug=false) {
//std::cout << "Newton Align" << std::endl;
//debug = true;
int path_idx_test = 0;
int debug_all = false;
//debug_all = true;
int rowsLog2 = log2ceil(inst.n_eq); // ceil for sum reduction
int dimLog2 = log2ceil(inst.dim); // ceil for sum reduction
int n_path = workspace.n_path;
for(int path_idx=0; path_idx<n_path; path_idx++){
workspace.newton_success_host[path_idx] = workspace.path_success_host[path_idx];
//std::cout << "workspace.success_host[path_idx] = " \
<< workspace.success_host[path_idx] << std::endl;
}
cudaMemcpy(workspace.newton_success, workspace.newton_success_host, n_path*sizeof(int),
cudaMemcpyHostToDevice);
//std::cout << "newton_success" << std::endl;
int n_path_continuous = 0;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
cudaMemcpyHostToDevice);
dim3 init_grid = get_grid(n_path_continuous, inst.coef_BS, 1);
mult_x_init<<<init_grid, inst.coef_BS>>>(workspace.x_array, workspace.t_array, workspace.alpha_gpu, \
workspace.x_mult, workspace.newton_t_mult, workspace.one_minor_t, \
workspace.path_idx, workspace.x_t_idx_mult, n_path_continuous, inst.dim, workspace.n_predictor);
dim3 max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
max_x_double_kernel_align<<<max_grid, inst.predict_BS>>>(workspace.x_mult, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
cudaMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
}
}
}
eval_mult(workspace, inst);
inst.n_eval_GPU++;
/*max_relative_double_kernel2<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);*/
max_relative_double_kernel3<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.n_path, workspace.path_idx, \
workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu);
if(debug){
cudaMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
}
dim3 check_grid = get_grid(n_path,inst.predict_BS,1);
for (int it_idx = 0; it_idx < path_parameter.max_it; it_idx++) {
if(debug){
std::cout << " Iteration " << it_idx << std::endl;
}
if(inst.dim <= BS_QR){
/*std::cout << "matrix" << std::endl;
CT** matrix_gpu_q = new CT*[n_path];
//CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
//matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
if(path_idx==path_idx_test){
for(int var_idx=1; var_idx<=1; var_idx++){
CT tmp(0.0,0.0);
for(int eq_idx=0; eq_idx<inst.n_eq; eq_idx++){
std::cout << var_idx << " " << eq_idx << " " \
<< matrix_gpu_q[path_idx][eq_idx+var_idx*inst.n_eq];
tmp += matrix_gpu_q[path_idx][eq_idx+var_idx*inst.n_eq];
}
std::cout << tmp;
}
std::cout << std::endl;
}
}*/
//std::cout << "n_matrix = " << workspace.n_matrix << std::endl;
mgs_small1_idx(workspace.matrix_horizontal_mult, workspace.R, workspace.sol, inst.n_eq, inst.dim+1,\
workspace.workspace_size, workspace.n_matrix, workspace.n_matrix_R, n_path_continuous, workspace.path_idx);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
/*CT** sol_gpu = new CT*[n_path];
//CT** matrix_gpu_q = new CT*[n_path];
//CT** matrix_gpu_r = new CT*[n_path];
for(int path_idx=0; path_idx<n_path; path_idx++){
sol_gpu[path_idx] = workspace.get_sol(path_idx);
//for(int var_idx=0; var_idx<inst.dim; var_idx++){
if(path_idx==path_idx_test){
for(int var_idx=0; var_idx<inst.dim; var_idx++){
std::cout << path_idx << " " << var_idx << " " << sol_gpu[path_idx][var_idx];
}
}
//matrix_gpu_q[path_idx] = workspace.get_matrix(path_idx);
//matrix_gpu_r[path_idx] = workspace.get_matrix_r(path_idx);
}*/
max_relative_double_kernel<<<max_grid, inst.predict_BS>>>(workspace.sol, inst.dim, n_path_continuous, \
workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, workspace.max_x_gpu, \
workspace.workspace_size, workspace.path_idx);
}
else{
mgs_large_block(workspace.V, workspace.R, workspace.P, workspace.sol, inst.n_eq, inst.dim+1);
//array_max_double_kernel<<<1,inst.dim>>>(workspace.sol, inst.dim, dimLog2, max_delta_x_gpu);
}
if(debug){
cudaMemcpy(workspace.max_delta_x_host, workspace.max_delta_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_delta_x_host, workspace.r_max_delta_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test|| debug_all){
std::cout << " max_delta_x " << path_idx << " = " << workspace.max_delta_x_host[path_idx] \
<< " " << workspace.r_max_delta_x_host[path_idx] << std::endl;
}
}
}
update_x_kernel<<<n_path_continuous, inst.dim>>>(workspace.x_array, workspace.sol, inst.dim, \
workspace.workspace_size, workspace.x_t_idx_mult, workspace.path_idx, workspace.n_predictor);
//workspace.print_x_mult();
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_delta_x_gpu, workspace.r_max_delta_x_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int), \
cudaMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
if(n_path_continuous==0){
break;
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous = " << n_path_continuous << " : ";
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << workspace.path_idx_host[path_idx] << ", ";
}
std::cout << std::endl;*/
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int), \
cudaMemcpyHostToDevice);
//std::cout << "Correct X:" << std::endl;
//workspace.print_x_mult();
init_grid = get_grid(n_path_continuous, inst.coef_BS, 1);
mult_x_init<<<init_grid, inst.coef_BS>>>(workspace.x_array, workspace.t_array, workspace.alpha_gpu, \
workspace.x_mult, workspace.newton_t_mult, workspace.one_minor_t, \
workspace.path_idx, workspace.x_t_idx_mult, n_path_continuous, inst.dim, workspace.n_predictor);
max_x_double_kernel_align<<<max_grid, inst.predict_BS>>>(workspace.x_mult, inst.dim, \
n_path_continuous, workspace.max_x_gpu, workspace.workspace_size, \
workspace.x_t_idx_mult, workspace.path_idx);
if(debug){
cudaMemcpy(workspace.max_x_host, workspace.max_x_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_x_gpu " << path_idx << "= " << workspace.max_x_host[path_idx] << std::endl;
}
}
}
eval_mult(workspace, inst);
//inst.n_eval_GPU++;
/*GT* tmp_f_val = workspace.matrix_horizontal_mult + inst.n_eq*inst.dim;
max_relative_double_kernel<<<max_grid, inst.predict_BS>>>(tmp_f_val, inst.n_eq, \
n_path_continuous, workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu, \
workspace.n_matrix, workspace.path_idx);*/
max_relative_double_kernel3<<<max_grid, inst.predict_BS>>>(workspace.f_val, inst.n_eq,\
n_path_continuous, workspace.n_path, workspace.path_idx, \
workspace.max_f_val_gpu, workspace.max_f_val_last_gpu, workspace.r_max_f_val_gpu, workspace.max_x_gpu);
if(debug){
cudaMemcpy(workspace.max_f_val_host, workspace.max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(workspace.r_max_f_val_host, workspace.r_max_f_val_gpu, n_path*sizeof(double),
cudaMemcpyDeviceToHost);
for(int path_idx=0; path_idx<n_path; path_idx++){
if(path_idx == path_idx_test || debug_all){
std::cout << " max_f_value " << path_idx << " = " << workspace.max_f_val_host[path_idx] \
<< " " << workspace.r_max_f_val_host[path_idx] << std::endl;
}
}
}
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_f_val_gpu, workspace.r_max_f_val_gpu, workspace.max_f_val_last_gpu, \
workspace.newton_success, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
cudaMemcpyDeviceToHost);
n_path_continuous = 0;
//std::cout << "newton_success" << std::endl;
for(int path_idx=0; path_idx<n_path; path_idx++){
//std::cout << path_idx << " " << workspace.newton_success_host[path_idx] << std::endl;
if(workspace.newton_success_host[path_idx] == 0){
workspace.path_idx_host[n_path_continuous] = path_idx;
n_path_continuous += 1;
}
}
if(n_path_continuous==0){
break;
}
workspace.n_path_continuous = n_path_continuous;
max_grid = get_grid(n_path_continuous,inst.predict_BS,1);
/*std::cout << "n_path_continuous = " << n_path_continuous << " : ";
for(int path_idx=0; path_idx<n_path_continuous; path_idx++){
std::cout << workspace.path_idx_host[path_idx] << ", ";
}
std::cout << std::endl;*/
cudaMemcpy(workspace.path_idx, workspace.path_idx_host, n_path*sizeof(int),
cudaMemcpyHostToDevice);
}
dim3 check_grid2 = get_grid(n_path,inst.predict_BS,1);
check_kernel<<<check_grid, inst.predict_BS>>>(workspace.max_f_val_gpu, workspace.r_max_f_val_gpu,workspace. max_delta_x_gpu, \
workspace.r_max_delta_x_gpu, workspace.path_success, workspace.newton_success, workspace.n_point_mult, workspace.x_t_idx_mult, workspace.n_array, \
workspace.workspace_size, n_path, workspace.end_range, path_parameter.err_min_round_off, path_parameter.err_min_round_off_refine);
cudaMemcpy(workspace.newton_success_host, workspace.newton_success, n_path*sizeof(int),
cudaMemcpyDeviceToHost);
if(debug){
std::cout << workspace.newton_success_host[path_idx_test] << std::endl;
}
return true;
}
bool GPU_Newton(CPUInstHom& hom, Parameter path_parameter, CT* cpu_sol0, CT cpu_t, CT*& x_new, int n_path) {
cout << "Newton ";
cout << "max_it = " << path_parameter.max_it << endl;
cout << "eps = " << path_parameter.err_max_delta_x << endl;
//clock_t begin = clock();
cuda_set();
GPUInst inst(hom, n_path);
int mon_pos_size = hom.CPU_inst_hom_mon.mon_pos_size;
if(MON_EVAL_METHOD == 1){
mon_pos_size = hom.CPU_inst_hom_block.mon_pos_block_size + inst.n_mon_level[0]*2;
}
GPUWorkspace workspace(mon_pos_size, inst.n_coef, inst.n_constant, inst.n_eq, inst.dim, path_parameter.n_predictor, inst.alpha);
workspace.update_x_t_value(cpu_sol0, cpu_t);
clock_t begin = clock();
bool success = newton(workspace, inst, path_parameter);
clock_t end = clock();
double timeSec_Newton = (end - begin) / static_cast<double>( CLOCKS_PER_SEC );
cout << "Path GPU Newton Time: "<< timeSec_Newton << endl;
x_new = workspace.get_x();
/*clock_t end = clock();
double timeSec = (end - begin) / static_cast<double>( CLOCKS_PER_SEC );
cout << "done: "<< timeSec << endl;*/
return success;
}
|
ca609895d66c635fda18120f800f9c0376cc0adc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "book.h"
#define UF 4
__global__ void orcu_kernel6(int n, int orcu_var3, double a1, double* y, double* x1) {
int tid=UF*(blockIdx.x*blockDim.x+threadIdx.x)+orcu_var3;
if (tid<=n-UF) {
{
y[tid]=y[tid]+a1*x1[tid];
int index = tid+1;
y[index]=y[index]+a1*x1[index];
index = tid+2;
y[index]=y[index]+a1*x1[index];
index = tid+3;
y[index]=y[index]+a1*x1[index];
}
}
}
//__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) {
//int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8;
//if (tid<=n-1) {
//y[tid]=y[tid]+a1*x1[tid];
//}
//}
void axpy1(int n, double *y, double a1, double *x1)
{
register int i;
/*@ begin Loop(
transform Composite(
cuda = (16,False, False, 1)
,scalarreplace = (False, 'int')
, unrolljam = (['i'], [2])
)
{
for (i=0; i<=n-1; i++) {
y[i]=y[i]+a1*x1[i];
}
}
) @*/
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
{
{
int orio_lbound1=0;
//{
/*declare variables*/
double *dev_y, *dev_x1;
int nthreads=TC;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=(n+nthreads-1)/nthreads;
dimGrid.x=(dimGrid.x+UF-1)/UF;
printf("num of blocks: %d\n", dimGrid.x);
/*allocate device memory*/
int nbytes=n*sizeof(double);
hipMalloc((void**)&dev_y,nbytes);
hipMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice);
hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice);
/*invoke device kernel*/
int orcu_var3=orio_lbound1;
HANDLE_ERROR(hipEventRecord(start, 0));
hipLaunchKernelGGL(( orcu_kernel6), dim3(dimGrid),dim3(dimBlock), 0, 0, n,orcu_var3,a1,dev_y,dev_x1);
HANDLE_ERROR(hipEventRecord(stop, 0));
/*copy data from device to host*/
hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost);
/*free allocated memory*/
hipFree(dev_y);
hipFree(dev_x1);
//}
//int orio_lbound2=n-((n-(0))%2);
{
/*declare variables*/
//double *dev_y, *dev_x1;
//int nthreads=TC;
/*calculate device dimensions*/
//dim3 dimGrid, dimBlock;
//dimBlock.x=nthreads;
//dimGrid.x=(n+nthreads-1)/nthreads;
/*allocate device memory*/
//int nbytes=n*sizeof(double);
//hipMalloc((void**)&dev_y,nbytes);
//hipMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
//hipMemcpy(dev_y,y,nbytes,hipMemcpyHostToDevice);
//hipMemcpy(dev_x1,x1,nbytes,hipMemcpyHostToDevice);
/*invoke device kernel*/
//int orcu_var8=orio_lbound2;
//orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1);
/*copy data from device to host*/
//hipMemcpy(y,dev_y,nbytes,hipMemcpyDeviceToHost);
/*free allocated memory*/
//hipFree(dev_y);
//hipFree(dev_x1);
}
}
}
/*@ end @*/
HANDLE_ERROR(hipEventSynchronize(stop));
float passedTime;
HANDLE_ERROR(hipEventElapsedTime(&passedTime, start, stop));
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
printf("timePassed: %f ms\n", passedTime);
}
int main(){
double* y = (double*) malloc(sizeof(double)*NN);
double* x1 = (double*) malloc(sizeof(double)*NN);
double a1 = AA;
int i;
for(i=0; i<NN; i++){
y[i] = i;
x1[i] = i;
}
axpy1(NN, y, a1, x1);
for(i=0; i<13; i++)
printf("%f\n", y[i]);
for(i=NN-9; i<NN; i++)
printf("%f\n", y[i]);
return 0;
}
| ca609895d66c635fda18120f800f9c0376cc0adc.cu | #include "book.h"
#define UF 4
__global__ void orcu_kernel6(int n, int orcu_var3, double a1, double* y, double* x1) {
int tid=UF*(blockIdx.x*blockDim.x+threadIdx.x)+orcu_var3;
if (tid<=n-UF) {
{
y[tid]=y[tid]+a1*x1[tid];
int index = tid+1;
y[index]=y[index]+a1*x1[index];
index = tid+2;
y[index]=y[index]+a1*x1[index];
index = tid+3;
y[index]=y[index]+a1*x1[index];
}
}
}
//__global__ void orcu_kernel11(int n, int orcu_var8, double a1, double* y, double* x1) {
//int tid=blockIdx.x*blockDim.x+threadIdx.x+orcu_var8;
//if (tid<=n-1) {
//y[tid]=y[tid]+a1*x1[tid];
//}
//}
void axpy1(int n, double *y, double a1, double *x1)
{
register int i;
/*@ begin Loop(
transform Composite(
cuda = (16,False, False, 1)
,scalarreplace = (False, 'int')
, unrolljam = (['i'], [2])
)
{
for (i=0; i<=n-1; i++) {
y[i]=y[i]+a1*x1[i];
}
}
) @*/
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
{
{
int orio_lbound1=0;
//{
/*declare variables*/
double *dev_y, *dev_x1;
int nthreads=TC;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=(n+nthreads-1)/nthreads;
dimGrid.x=(dimGrid.x+UF-1)/UF;
printf("num of blocks: %d\n", dimGrid.x);
/*allocate device memory*/
int nbytes=n*sizeof(double);
cudaMalloc((void**)&dev_y,nbytes);
cudaMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice);
cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice);
/*invoke device kernel*/
int orcu_var3=orio_lbound1;
HANDLE_ERROR(cudaEventRecord(start, 0));
orcu_kernel6<<<dimGrid,dimBlock>>>(n,orcu_var3,a1,dev_y,dev_x1);
HANDLE_ERROR(cudaEventRecord(stop, 0));
/*copy data from device to host*/
cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost);
/*free allocated memory*/
cudaFree(dev_y);
cudaFree(dev_x1);
//}
//int orio_lbound2=n-((n-(0))%2);
{
/*declare variables*/
//double *dev_y, *dev_x1;
//int nthreads=TC;
/*calculate device dimensions*/
//dim3 dimGrid, dimBlock;
//dimBlock.x=nthreads;
//dimGrid.x=(n+nthreads-1)/nthreads;
/*allocate device memory*/
//int nbytes=n*sizeof(double);
//cudaMalloc((void**)&dev_y,nbytes);
//cudaMalloc((void**)&dev_x1,nbytes);
/*copy data from host to device*/
//cudaMemcpy(dev_y,y,nbytes,cudaMemcpyHostToDevice);
//cudaMemcpy(dev_x1,x1,nbytes,cudaMemcpyHostToDevice);
/*invoke device kernel*/
//int orcu_var8=orio_lbound2;
//orcu_kernel11<<<dimGrid,dimBlock>>>(n,orcu_var8,a1,dev_y,dev_x1);
/*copy data from device to host*/
//cudaMemcpy(y,dev_y,nbytes,cudaMemcpyDeviceToHost);
/*free allocated memory*/
//cudaFree(dev_y);
//cudaFree(dev_x1);
}
}
}
/*@ end @*/
HANDLE_ERROR(cudaEventSynchronize(stop));
float passedTime;
HANDLE_ERROR(cudaEventElapsedTime(&passedTime, start, stop));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("timePassed: %f ms\n", passedTime);
}
int main(){
double* y = (double*) malloc(sizeof(double)*NN);
double* x1 = (double*) malloc(sizeof(double)*NN);
double a1 = AA;
int i;
for(i=0; i<NN; i++){
y[i] = i;
x1[i] = i;
}
axpy1(NN, y, a1, x1);
for(i=0; i<13; i++)
printf("%f\n", y[i]);
for(i=NN-9; i<NN; i++)
printf("%f\n", y[i]);
return 0;
}
|
301e2f2e25cb87eca728b92fba1c641c936ffbc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "main.cuh"
__device__ int get_id() {
return (threadIdx.x + blockDim.x*blockIdx.x);
}
__global__ void gpu_astar_init(Problem hp, void *node_start, void *node_dest, void *nodes_unusual_raw) {
p = hp;
memory_init(p.memory, p.mem_size);
queues_init(p.queues, p.queues_size, p.k);
p.queues_items = 1;
map_init(p.map, p.map_size);
if (p.type == PROBLEM_TYPE_PATHFINDING) {
map_init(p.pathfinding.map_unusual_nodes, p.pathfinding.map_size);
NodePathfinding* nodes_unusual = (NodePathfinding*)nodes_unusual_raw;
Node *node;
for (int i = 0; i < p.pathfinding.unusual_nodes_count; i++) {
node = (Node*)memory_allocate(p.memory, sizeof(Node));
node->data = (void*)&nodes_unusual[i];
node->id = node_id(node, PROBLEM_TYPE_PATHFINDING, 0, p.pathfinding.dim_x);
node = map_set(p.pathfinding.map_unusual_nodes, node, node->id);
assert(node == NULL);
}
}
p.node_start = (Node*)memory_allocate(p.memory, sizeof(Node));
p.node_start->data = node_start;
p.node_start->id = node_id(p.node_start, p.type, p.sliding.n, p.pathfinding.dim_x);
p.node_destination = (Node*)memory_allocate(p.memory, sizeof(Node));
p.node_destination->data = node_dest;
p.node_destination->id = node_id(p.node_destination, p.type, p.sliding.n, p.pathfinding.dim_x);
p.solutions_min_g = INT_MAX;
p.queues_min_fg = INT_MAX - 16;
solution_found = false;
}
__global__ void gpu_astar_final(Node** solutions, void* path, int* path_count, int path_count_max) {
Node *m = NULL;
Node *node;
for (int q_id = 0; q_id < p.k; q_id++) {
node = solutions[q_id];
if (node == NULL) {
continue;
}
if (m == NULL || node->g < m->g) {
m = node;
}
}
size_t nsize = node_size(p.type, p.sliding.n);
int i = 0;
size_c path_node;
while (m != NULL) {
path_node = ((size_c)path + i*nsize);
node_copy(m->data, (void*)path_node, nsize);
m = m->previous_node;
i++;
}
*path_count = i;
}
__device__ void gpu_astar_update_queues_min_fg() {
bool empty = true;
Node *q_min;
Node *total_min = NULL;
for (int i = 0; i < p.k; i++) {
q_min = queue_min(&p.queues[i]);
if (q_min == NULL) {
continue;
}
empty = false;
if (total_min == NULL || q_min->fg < total_min->fg) {
total_min = q_min;
}
}
if (empty) {
return;
}
// if (p.queues_min_fg != total_min->fg) {
// printf("QUEUES MIN FG: %d -> %d\n", p.queues_min_fg, total_min->fg);
// }
p.queues_min_fg = total_min->fg;
}
__global__ void gpu_astar(Node** nodes, int nodes_count, Node** solutions) {
int expand_factor = 4*p.type;
int id = get_id();
int q_id = id / expand_factor;
assert(q_id < p.k);
int is_main = (id % expand_factor == 0) ? 1 : 0;
int id_main = q_id * (expand_factor+1);
int id_node = id_main + (id % expand_factor) + 1;
if (id < nodes_count) {
nodes[id] = NULL;
}
if (id < p.k) {
solutions[id] = NULL;
}
if (id == 0) {
queue_push(&p.queues[0], p.node_start);
}
assert(q_id < p.k);
Node *node;
int queue_items_diff;
bool queue_popped = false;
bool force_close;
int push_q_id;
while (!solution_found && p.queues_items > 0) {
// Pop the min element from queue.
if (is_main) {
queue_pop(&p.queues[q_id], &nodes[id_main], p.queues_min_fg + 5);
if (nodes[id_main] != NULL) {
queue_items_diff = -1;
queue_popped = true;
force_close = false;
}
}
if (queue_popped) {
if (nodes[id_main]->g >= p.solutions_min_g) {
nodes[id_main] = NULL;
force_close = true;
}
}
// Expand the elements using one thread per each expand direction.
if (nodes[id_main] != NULL) {
node = nodes[id_main];
if (is_main && node_compare(node->data, p.node_destination->data, p.type, p.sliding.n)) {
if (solutions[q_id] == NULL || node->fg < solutions[q_id]->fg) {
solutions[q_id] = node;
if (solutions[q_id]->g < p.solutions_min_g) {
// printf("SOLUTIONS MIN G: %d -> %d\n", p.solutions_min_g, solutions[q_id]->g);
atomicExch(&p.solutions_min_g, solutions[q_id]->g);
}
}
}
if (solutions[q_id] != NULL && solutions[q_id]->fg <= p.queues_min_fg) {
solution_found = true;
break;
}
if (nodes[id_node] == NULL) {
nodes[id_node] = (Node*)memory_allocate(p.memory, sizeof(Node));
nodes[id_node]->data = memory_allocate(p.memory, node_size(p.type, p.sliding.n));
}
nodes[id_node]->previous_node = nodes[id_main];
bool expanded = node_expand(nodes[id_main], nodes[id_node], p.type, p.sliding.n, p.pathfinding.dim_x, p.pathfinding.dim_y);
nodes[id_node]->id = node_id(nodes[id_node], p.type, p.sliding.n, p.pathfinding.dim_x);
if (p.type == PROBLEM_TYPE_PATHFINDING && expanded) {
Node *unusual = map_get(p.pathfinding.map_unusual_nodes, nodes[id_node]->id);
if (unusual != NULL) {
NodePathfinding* unusualp = (NodePathfinding*)unusual->data;
if (unusualp->weight == -1) {
expanded = false;
} else {
nodes[id_node]->g = nodes[id_main]->g + unusualp->weight;
}
} else {
nodes[id_node]->g = nodes[id_main]->g + 1;
}
nodes[id_node]->fg = nodes[id_node]->g + node_f(nodes[id_node], p.node_destination, PROBLEM_TYPE_PATHFINDING, 0);
} else if (p.type == PROBLEM_TYPE_SLIDING && expanded) {
nodes[id_node]->g = nodes[id_main]->g + 1;
nodes[id_node]->fg = nodes[id_node]->g + node_f(nodes[id_node], p.node_destination, PROBLEM_TYPE_SLIDING, p.sliding.n);
}
if (expanded && nodes[id_node] != NULL) {
if (!map_is_duplicate(p.map, nodes[id_node])) {
assert(nodes[id_node] != NULL);
push_q_id = (nodes[id_node]->id * (q_id+1));
if (push_q_id < 0)
push_q_id *= -1;
push_q_id = push_q_id % p.k;
queue_push(&p.queues[push_q_id], nodes[id_node]);
nodes[id_node] = NULL;
}
}
}
// Fix queues items structure (sort items).
__syncthreads();
if (is_main) {
queue_fix(&p.queues[q_id]);
if (queue_popped) {
if (!force_close) {
for (int i = 0; i < expand_factor; i++) {
if (nodes[id_node + i] == NULL) {
queue_items_diff += 1;
}
}
}
atomicAdd(&p.queues_items, queue_items_diff);
queue_popped = false;
}
nodes[id_main] = NULL;
}
if (id == 0) {
gpu_astar_update_queues_min_fg();
}
}
}
int parse_pathfinding(Problem *problem, char *filename, void **node_start, void **node_dest, void **unusual_nodes) {
FILE *f = fopen(filename, "r");
if (f == NULL) {
perror("couldn't open input-data file");
return 1;
}
if (fscanf(f, "%d,%d\n", &problem->pathfinding.dim_x, &problem->pathfinding.dim_y) < 2) {
perror("couldn't parse pathfinding dimensions");
fclose(f);
return 1;
}
NodePathfinding *start = (NodePathfinding*)malloc(sizeof(NodePathfinding));
if (fscanf(f, "%d,%d\n", &start->x, &start->y) < 2) {
perror("couldn't parse pathfinding start position");
fclose(f);
return 1;
}
*node_start = start;
NodePathfinding *dest = (NodePathfinding*)malloc(sizeof(NodePathfinding));
if (fscanf(f, "%d,%d\n", &dest->x, &dest->y) < 2) {
perror("couldn't parse pathfinding dest position");
fclose(f);
return 1;
}
*node_dest = dest;
int obstacles_count;
if (fscanf(f, "%d\n", &obstacles_count) < 1) {
perror("couldn't parse pathfinding obstacles_count number");
fclose(f);
return 1;
}
NodePathfinding* nodes_obstacles = (NodePathfinding*)malloc(obstacles_count*sizeof(NodePathfinding));
if (nodes_obstacles == NULL) {
perror("couldn't allocate memory for obstacles_count");
fclose(f);
return 1;
}
for (int i = 0; i < obstacles_count; i++) {
if (fscanf(f, "%d,%d\n", &nodes_obstacles[i].x, &nodes_obstacles[i].y) < 2) {
perror("couldn't read obstacle coordinates");
fclose(f);
free(nodes_obstacles);
return 1;
}
nodes_obstacles[i].weight = -1;
}
int nodes_worse_count;
if (fscanf(f, "%d\n", &nodes_worse_count) < 1) {
perror("couldn't parse pathfinding nodes_worse_count number");
fclose(f);
free(nodes_obstacles);
return 1;
}
NodePathfinding* nodes_worse = (NodePathfinding*)malloc(nodes_worse_count*sizeof(NodePathfinding));
if (nodes_worse == NULL) {
perror("couldn't allocate memory for nodes_worse_count");
fclose(f);
return 1;
}
for (int i = 0; i < nodes_worse_count; i++) {
if (fscanf(f, "%d,%d,%d\n", &nodes_worse[i].x, &nodes_worse[i].y, &nodes_worse[i].weight) < 3) {
perror("couldn't read worse node coordinates");
fclose(f);
free(nodes_worse);
free(nodes_obstacles);
return 1;
}
}
problem->pathfinding.unusual_nodes_count = obstacles_count + nodes_worse_count;
NodePathfinding* nodes = (NodePathfinding*)malloc(problem->pathfinding.unusual_nodes_count*sizeof(NodePathfinding));
if (nodes == NULL) {
perror("couldn't allocat ememory for unusual nodes");
free(nodes_obstacles);
free(nodes_worse);
fclose(f);
return 1;
}
for (int i = 0; i < obstacles_count; i++) {
nodes[i] = nodes_obstacles[i];
}
for (int i = 0; i < nodes_worse_count; i++) {
nodes[obstacles_count + i] = nodes_worse[i];
}
*unusual_nodes = (void*)nodes;
free(nodes_obstacles);
free(nodes_worse);
fclose(f);
return 0;
}
char* parse_sliding_read_file(char *filename, size_t max_len) {
char *buffer = (char*)malloc(sizeof(char)*max_len);
FILE *f;
f = fopen(filename, "r");
if (f == NULL) {
perror("couldn't open input-data file");
free(buffer);
return NULL;
}
size_t l = fread(buffer, sizeof(char), max_len, f);
if (ferror(f) != 0) {
fputs("couldn't read input-data file", stderr);
} else {
buffer[l++] = '\0';
}
if (fclose(f)) {
perror("couldn't close input-data file");
free(buffer);
return NULL;
}
return buffer;
}
int parse_sliding(Problem *problem, char *filename, void **node_start, void **node_dest) {
size_t max_len = 1024*1024;
char* buffer = parse_sliding_read_file(filename, max_len);
int n = 0;
for (int i = 0; i < max_len && buffer[i] != '\0'; i++) {
if (buffer[i] == '_') {
buffer[i] = '0';
}
if (buffer[i] == ',') {
n++;
buffer[i] = ' ';
}
}
n = (n + 2)/2;
problem->sliding.numbers_count = n;
problem->sliding.n = sqrt(n);
int* ns = (int*)malloc(sizeof(int)*problem->sliding.numbers_count);
int* nd = (int*)malloc(sizeof(int)*problem->sliding.numbers_count);
char *buf = buffer;
int pos;
for (int i = 0; i < problem->sliding.numbers_count; i++) {
sscanf(buf, "%d%n", &ns[i], &pos);
buf += pos;
}
for (int i = 0; i < problem->sliding.numbers_count; i++) {
sscanf(buf, "%d%n", &nd[i], &pos);
buf += pos;
}
*node_start = ns;
*node_dest = nd;
free(buffer);
return 0;
}
int write_sliding(FILE *f, Problem problem, int path_count, void* path) {
NodeSliding *np;
size_t nsize = node_size(problem.type, problem.sliding.n);
int chars = 0;
for (int i = path_count - 1; i >= 0; i--) {
np = (NodeSliding*)((size_c)path + i*nsize);
for (int j = 0; j < problem.sliding.numbers_count-1; j++) {
if (np->numbers[j] != 0)
chars = fprintf(f, "%d,", np->numbers[j]);
else
chars = fprintf(f, "_,");
if (chars < 0) {
return 3;
}
}
if (fprintf(f, "%d\n",np->numbers[problem.sliding.numbers_count-1]) < 0) {
return 3;
}
}
return 0;
}
int write_pathfinding(FILE *f, int path_count, void* path) {
NodePathfinding *path_nodes = (NodePathfinding*)path;
for (int i = path_count - 1; i >= 0; i--) {
if (fprintf(f, "%d,%d\n", path_nodes[i].x, path_nodes[i].y) < 0) {
return 3;
}
}
return 0;
}
int write_file(char *filename, Problem problem, int path_count, void* path, float elapsedTime) {
// Open file descriptor.
FILE *f = fopen(filename, "w");
if (f == NULL) {
perror("couldn't open output file fd");
return 2;
}
// Print out solution to file.
fprintf(f, "%.0f\n", elapsedTime);
int status;
switch (problem.type) {
case PROBLEM_TYPE_SLIDING:
status = write_sliding(f, problem, path_count, path);
break;
case PROBLEM_TYPE_PATHFINDING:
status = write_pathfinding(f, path_count, path);
break;
default:
assert(false);
}
// Close file descriptor.
if (fclose(f)) {
perror("couldn't close output file fd");
return 1;
}
return status;
}
int parse_arguments(int argc, char **argv, int *problem_type,
char **input_file, char **output_file) {
char *version = NULL;
int c;
while (1) {
static struct option long_options[] = {
{"version", required_argument, 0, 'v'},
{"input-data", required_argument, 0, 'i'},
{"output-data", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
int option_index = 0;
c = getopt_long (argc, argv, "v:i:o:", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
case 'v':
version = optarg;
break;
case 'i':
*input_file = optarg;
break;
case 'o':
*output_file = optarg;
break;
default:
break;
}
}
if (version == NULL || *input_file == NULL || *output_file == NULL) {
printf("Invalid arguments. Required 'version', 'input-file', 'output-file'.\n");
return 1;
}
*problem_type = 0;
if (!strcmp("pathfinding", version)) {
*problem_type = PROBLEM_TYPE_PATHFINDING;
}
if (!strcmp("sliding", version)) {
*problem_type = PROBLEM_TYPE_SLIDING;
}
if (*problem_type == 0) {
printf("Invalid 'version' argument.\n");
return 1;
}
return 0;
}
int main(int argc, char **argv) {
Problem problem;
char *input_file = NULL;
char *output_file = NULL;
// Parse input data.
if (parse_arguments(argc, argv, &problem.type, &input_file, &output_file)) {
return 1;
}
void *host_node_start, *host_node_dest, *host_nodes_unusual;
void *dev_node_start, *dev_node_dest, *dev_nodes_unusual;
switch (problem.type) {
case PROBLEM_TYPE_PATHFINDING:
if (parse_pathfinding(&problem, input_file, &host_node_start, &host_node_dest, &host_nodes_unusual))
return 1;
handleError(hipMalloc((void**)&dev_node_start, sizeof(NodePathfinding)));
handleError(hipMemcpy(dev_node_start, host_node_start, sizeof(NodePathfinding), hipMemcpyHostToDevice));
handleError(hipMalloc((void**)&dev_node_dest, sizeof(NodePathfinding)));
handleError(hipMemcpy(dev_node_dest, host_node_dest, sizeof(NodePathfinding), hipMemcpyHostToDevice));
handleError(hipMalloc((void**)&dev_nodes_unusual, problem.pathfinding.unusual_nodes_count*sizeof(NodePathfinding)));
handleError(hipMemcpy(dev_nodes_unusual, host_nodes_unusual, problem.pathfinding.unusual_nodes_count*sizeof(NodePathfinding), hipMemcpyHostToDevice));
free(host_nodes_unusual);
problem.pathfinding.map_size = 1024*1024* 1024L;
handleError(hipMalloc((void**)&problem.pathfinding.map_unusual_nodes, problem.pathfinding.map_size));
break;
case PROBLEM_TYPE_SLIDING:
if (parse_sliding(&problem, input_file, &host_node_start, &host_node_dest))
return 1;
handleError(hipMalloc((void**)&dev_node_start, problem.sliding.numbers_count*sizeof(int)));
handleError(hipMemcpy(dev_node_start, host_node_start, sizeof(int)*problem.sliding.numbers_count, hipMemcpyHostToDevice));
handleError(hipMalloc((void**)&dev_node_dest, problem.sliding.numbers_count*sizeof(int)));
handleError(hipMemcpy(dev_node_dest, host_node_dest, sizeof(int)*problem.sliding.numbers_count, hipMemcpyHostToDevice));
break;
}
// Initialize problem, memory and GPU.
int block_num = 16;
int threads_per_block = 1024;
problem.mem_size = 1024*1024*1024 * 7L;
problem.queues_size = 1024*1024*1024 * 2L;
problem.map_size = 1024*1024*1024 * 2L;
problem.k = (block_num * threads_per_block) / (4*problem.type);
size_t nodes_count = ((problem.type*4)+1)*problem.k;
Node **nodes, **nodes_solutions;
handleError(hipMalloc((void**)&nodes, sizeof(Node*)*nodes_count));
handleError(hipMalloc((void**)&nodes_solutions, sizeof(Node*)*problem.k));
handleError(hipMalloc((void**)&problem.memory, problem.mem_size));
handleError(hipMalloc((void**)&problem.queues, problem.queues_size));
handleError(hipMalloc((void**)&problem.map, problem.map_size));
hipLaunchKernelGGL(( gpu_astar_init), dim3(1), dim3(1), 0, 0, problem, dev_node_start, dev_node_dest, dev_nodes_unusual);
hipDeviceSynchronize();
// Run algorithm.
hipEvent_t start, stop;
float elapsedTime;
handleError(hipEventCreate(&start));
handleError(hipEventCreate(&stop));
handleError(hipEventRecord(start, 0));
hipLaunchKernelGGL(( gpu_astar), dim3(block_num), dim3(threads_per_block), 0, 0, nodes, nodes_count, nodes_solutions);
handleError(hipEventRecord(stop, 0));
handleError(hipEventSynchronize(stop));
handleError(hipEventElapsedTime(&elapsedTime, start, stop));
handleError(hipEventDestroy(start));
handleError(hipEventDestroy(stop));
// Do some final staff (prepare nodes data to copy).
handleError(hipFree(problem.map));
if (problem.type == PROBLEM_TYPE_PATHFINDING) {
handleError(hipFree(problem.pathfinding.map_unusual_nodes));
}
int path_count_max = problem.map_size / node_size(problem.type, problem.sliding.n);
void *path, *dev_path;
handleError(hipMalloc((void**)&dev_path, problem.map_size));
int *dev_path_count;
handleError(hipMalloc(&dev_path_count, sizeof(int)));
hipLaunchKernelGGL(( gpu_astar_final), dim3(1), dim3(1), 0, 0, nodes_solutions, dev_path, dev_path_count, path_count_max);
hipDeviceSynchronize();
// Copy results back to host memory.
int path_count;
hipMemcpy(&path_count, dev_path_count, sizeof(int), hipMemcpyDeviceToHost);
printf("Distance: %d\n", path_count-1);
size_c path_size = node_size(problem.type, problem.sliding.n)*path_count;
path = malloc(path_size);
handleError(hipMemcpy(path, dev_path, path_size, hipMemcpyDeviceToHost));
int status = write_file(output_file, problem, path_count, path, elapsedTime);
// Free memory.
handleError(hipFree(problem.memory));
handleError(hipFree(problem.queues));
handleError(hipFree(nodes));
handleError(hipFree(nodes_solutions));
handleError(hipFree(dev_node_start));
handleError(hipFree(dev_node_dest));
free(host_node_start);
free(host_node_dest);
return status;
}
| 301e2f2e25cb87eca728b92fba1c641c936ffbc9.cu | #include "main.cuh"
__device__ int get_id() {
return (threadIdx.x + blockDim.x*blockIdx.x);
}
__global__ void gpu_astar_init(Problem hp, void *node_start, void *node_dest, void *nodes_unusual_raw) {
p = hp;
memory_init(p.memory, p.mem_size);
queues_init(p.queues, p.queues_size, p.k);
p.queues_items = 1;
map_init(p.map, p.map_size);
if (p.type == PROBLEM_TYPE_PATHFINDING) {
map_init(p.pathfinding.map_unusual_nodes, p.pathfinding.map_size);
NodePathfinding* nodes_unusual = (NodePathfinding*)nodes_unusual_raw;
Node *node;
for (int i = 0; i < p.pathfinding.unusual_nodes_count; i++) {
node = (Node*)memory_allocate(p.memory, sizeof(Node));
node->data = (void*)&nodes_unusual[i];
node->id = node_id(node, PROBLEM_TYPE_PATHFINDING, 0, p.pathfinding.dim_x);
node = map_set(p.pathfinding.map_unusual_nodes, node, node->id);
assert(node == NULL);
}
}
p.node_start = (Node*)memory_allocate(p.memory, sizeof(Node));
p.node_start->data = node_start;
p.node_start->id = node_id(p.node_start, p.type, p.sliding.n, p.pathfinding.dim_x);
p.node_destination = (Node*)memory_allocate(p.memory, sizeof(Node));
p.node_destination->data = node_dest;
p.node_destination->id = node_id(p.node_destination, p.type, p.sliding.n, p.pathfinding.dim_x);
p.solutions_min_g = INT_MAX;
p.queues_min_fg = INT_MAX - 16;
solution_found = false;
}
__global__ void gpu_astar_final(Node** solutions, void* path, int* path_count, int path_count_max) {
Node *m = NULL;
Node *node;
for (int q_id = 0; q_id < p.k; q_id++) {
node = solutions[q_id];
if (node == NULL) {
continue;
}
if (m == NULL || node->g < m->g) {
m = node;
}
}
size_t nsize = node_size(p.type, p.sliding.n);
int i = 0;
size_c path_node;
while (m != NULL) {
path_node = ((size_c)path + i*nsize);
node_copy(m->data, (void*)path_node, nsize);
m = m->previous_node;
i++;
}
*path_count = i;
}
__device__ void gpu_astar_update_queues_min_fg() {
bool empty = true;
Node *q_min;
Node *total_min = NULL;
for (int i = 0; i < p.k; i++) {
q_min = queue_min(&p.queues[i]);
if (q_min == NULL) {
continue;
}
empty = false;
if (total_min == NULL || q_min->fg < total_min->fg) {
total_min = q_min;
}
}
if (empty) {
return;
}
// if (p.queues_min_fg != total_min->fg) {
// printf("QUEUES MIN FG: %d -> %d\n", p.queues_min_fg, total_min->fg);
// }
p.queues_min_fg = total_min->fg;
}
__global__ void gpu_astar(Node** nodes, int nodes_count, Node** solutions) {
int expand_factor = 4*p.type;
int id = get_id();
int q_id = id / expand_factor;
assert(q_id < p.k);
int is_main = (id % expand_factor == 0) ? 1 : 0;
int id_main = q_id * (expand_factor+1);
int id_node = id_main + (id % expand_factor) + 1;
if (id < nodes_count) {
nodes[id] = NULL;
}
if (id < p.k) {
solutions[id] = NULL;
}
if (id == 0) {
queue_push(&p.queues[0], p.node_start);
}
assert(q_id < p.k);
Node *node;
int queue_items_diff;
bool queue_popped = false;
bool force_close;
int push_q_id;
while (!solution_found && p.queues_items > 0) {
// Pop the min element from queue.
if (is_main) {
queue_pop(&p.queues[q_id], &nodes[id_main], p.queues_min_fg + 5);
if (nodes[id_main] != NULL) {
queue_items_diff = -1;
queue_popped = true;
force_close = false;
}
}
if (queue_popped) {
if (nodes[id_main]->g >= p.solutions_min_g) {
nodes[id_main] = NULL;
force_close = true;
}
}
// Expand the elements using one thread per each expand direction.
if (nodes[id_main] != NULL) {
node = nodes[id_main];
if (is_main && node_compare(node->data, p.node_destination->data, p.type, p.sliding.n)) {
if (solutions[q_id] == NULL || node->fg < solutions[q_id]->fg) {
solutions[q_id] = node;
if (solutions[q_id]->g < p.solutions_min_g) {
// printf("SOLUTIONS MIN G: %d -> %d\n", p.solutions_min_g, solutions[q_id]->g);
atomicExch(&p.solutions_min_g, solutions[q_id]->g);
}
}
}
if (solutions[q_id] != NULL && solutions[q_id]->fg <= p.queues_min_fg) {
solution_found = true;
break;
}
if (nodes[id_node] == NULL) {
nodes[id_node] = (Node*)memory_allocate(p.memory, sizeof(Node));
nodes[id_node]->data = memory_allocate(p.memory, node_size(p.type, p.sliding.n));
}
nodes[id_node]->previous_node = nodes[id_main];
bool expanded = node_expand(nodes[id_main], nodes[id_node], p.type, p.sliding.n, p.pathfinding.dim_x, p.pathfinding.dim_y);
nodes[id_node]->id = node_id(nodes[id_node], p.type, p.sliding.n, p.pathfinding.dim_x);
if (p.type == PROBLEM_TYPE_PATHFINDING && expanded) {
Node *unusual = map_get(p.pathfinding.map_unusual_nodes, nodes[id_node]->id);
if (unusual != NULL) {
NodePathfinding* unusualp = (NodePathfinding*)unusual->data;
if (unusualp->weight == -1) {
expanded = false;
} else {
nodes[id_node]->g = nodes[id_main]->g + unusualp->weight;
}
} else {
nodes[id_node]->g = nodes[id_main]->g + 1;
}
nodes[id_node]->fg = nodes[id_node]->g + node_f(nodes[id_node], p.node_destination, PROBLEM_TYPE_PATHFINDING, 0);
} else if (p.type == PROBLEM_TYPE_SLIDING && expanded) {
nodes[id_node]->g = nodes[id_main]->g + 1;
nodes[id_node]->fg = nodes[id_node]->g + node_f(nodes[id_node], p.node_destination, PROBLEM_TYPE_SLIDING, p.sliding.n);
}
if (expanded && nodes[id_node] != NULL) {
if (!map_is_duplicate(p.map, nodes[id_node])) {
assert(nodes[id_node] != NULL);
push_q_id = (nodes[id_node]->id * (q_id+1));
if (push_q_id < 0)
push_q_id *= -1;
push_q_id = push_q_id % p.k;
queue_push(&p.queues[push_q_id], nodes[id_node]);
nodes[id_node] = NULL;
}
}
}
// Fix queues items structure (sort items).
__syncthreads();
if (is_main) {
queue_fix(&p.queues[q_id]);
if (queue_popped) {
if (!force_close) {
for (int i = 0; i < expand_factor; i++) {
if (nodes[id_node + i] == NULL) {
queue_items_diff += 1;
}
}
}
atomicAdd(&p.queues_items, queue_items_diff);
queue_popped = false;
}
nodes[id_main] = NULL;
}
if (id == 0) {
gpu_astar_update_queues_min_fg();
}
}
}
int parse_pathfinding(Problem *problem, char *filename, void **node_start, void **node_dest, void **unusual_nodes) {
FILE *f = fopen(filename, "r");
if (f == NULL) {
perror("couldn't open input-data file");
return 1;
}
if (fscanf(f, "%d,%d\n", &problem->pathfinding.dim_x, &problem->pathfinding.dim_y) < 2) {
perror("couldn't parse pathfinding dimensions");
fclose(f);
return 1;
}
NodePathfinding *start = (NodePathfinding*)malloc(sizeof(NodePathfinding));
if (fscanf(f, "%d,%d\n", &start->x, &start->y) < 2) {
perror("couldn't parse pathfinding start position");
fclose(f);
return 1;
}
*node_start = start;
NodePathfinding *dest = (NodePathfinding*)malloc(sizeof(NodePathfinding));
if (fscanf(f, "%d,%d\n", &dest->x, &dest->y) < 2) {
perror("couldn't parse pathfinding dest position");
fclose(f);
return 1;
}
*node_dest = dest;
int obstacles_count;
if (fscanf(f, "%d\n", &obstacles_count) < 1) {
perror("couldn't parse pathfinding obstacles_count number");
fclose(f);
return 1;
}
NodePathfinding* nodes_obstacles = (NodePathfinding*)malloc(obstacles_count*sizeof(NodePathfinding));
if (nodes_obstacles == NULL) {
perror("couldn't allocate memory for obstacles_count");
fclose(f);
return 1;
}
for (int i = 0; i < obstacles_count; i++) {
if (fscanf(f, "%d,%d\n", &nodes_obstacles[i].x, &nodes_obstacles[i].y) < 2) {
perror("couldn't read obstacle coordinates");
fclose(f);
free(nodes_obstacles);
return 1;
}
nodes_obstacles[i].weight = -1;
}
int nodes_worse_count;
if (fscanf(f, "%d\n", &nodes_worse_count) < 1) {
perror("couldn't parse pathfinding nodes_worse_count number");
fclose(f);
free(nodes_obstacles);
return 1;
}
NodePathfinding* nodes_worse = (NodePathfinding*)malloc(nodes_worse_count*sizeof(NodePathfinding));
if (nodes_worse == NULL) {
perror("couldn't allocate memory for nodes_worse_count");
fclose(f);
return 1;
}
for (int i = 0; i < nodes_worse_count; i++) {
if (fscanf(f, "%d,%d,%d\n", &nodes_worse[i].x, &nodes_worse[i].y, &nodes_worse[i].weight) < 3) {
perror("couldn't read worse node coordinates");
fclose(f);
free(nodes_worse);
free(nodes_obstacles);
return 1;
}
}
problem->pathfinding.unusual_nodes_count = obstacles_count + nodes_worse_count;
NodePathfinding* nodes = (NodePathfinding*)malloc(problem->pathfinding.unusual_nodes_count*sizeof(NodePathfinding));
if (nodes == NULL) {
perror("couldn't allocat ememory for unusual nodes");
free(nodes_obstacles);
free(nodes_worse);
fclose(f);
return 1;
}
for (int i = 0; i < obstacles_count; i++) {
nodes[i] = nodes_obstacles[i];
}
for (int i = 0; i < nodes_worse_count; i++) {
nodes[obstacles_count + i] = nodes_worse[i];
}
*unusual_nodes = (void*)nodes;
free(nodes_obstacles);
free(nodes_worse);
fclose(f);
return 0;
}
char* parse_sliding_read_file(char *filename, size_t max_len) {
char *buffer = (char*)malloc(sizeof(char)*max_len);
FILE *f;
f = fopen(filename, "r");
if (f == NULL) {
perror("couldn't open input-data file");
free(buffer);
return NULL;
}
size_t l = fread(buffer, sizeof(char), max_len, f);
if (ferror(f) != 0) {
fputs("couldn't read input-data file", stderr);
} else {
buffer[l++] = '\0';
}
if (fclose(f)) {
perror("couldn't close input-data file");
free(buffer);
return NULL;
}
return buffer;
}
int parse_sliding(Problem *problem, char *filename, void **node_start, void **node_dest) {
size_t max_len = 1024*1024;
char* buffer = parse_sliding_read_file(filename, max_len);
int n = 0;
for (int i = 0; i < max_len && buffer[i] != '\0'; i++) {
if (buffer[i] == '_') {
buffer[i] = '0';
}
if (buffer[i] == ',') {
n++;
buffer[i] = ' ';
}
}
n = (n + 2)/2;
problem->sliding.numbers_count = n;
problem->sliding.n = sqrt(n);
int* ns = (int*)malloc(sizeof(int)*problem->sliding.numbers_count);
int* nd = (int*)malloc(sizeof(int)*problem->sliding.numbers_count);
char *buf = buffer;
int pos;
for (int i = 0; i < problem->sliding.numbers_count; i++) {
sscanf(buf, "%d%n", &ns[i], &pos);
buf += pos;
}
for (int i = 0; i < problem->sliding.numbers_count; i++) {
sscanf(buf, "%d%n", &nd[i], &pos);
buf += pos;
}
*node_start = ns;
*node_dest = nd;
free(buffer);
return 0;
}
int write_sliding(FILE *f, Problem problem, int path_count, void* path) {
NodeSliding *np;
size_t nsize = node_size(problem.type, problem.sliding.n);
int chars = 0;
for (int i = path_count - 1; i >= 0; i--) {
np = (NodeSliding*)((size_c)path + i*nsize);
for (int j = 0; j < problem.sliding.numbers_count-1; j++) {
if (np->numbers[j] != 0)
chars = fprintf(f, "%d,", np->numbers[j]);
else
chars = fprintf(f, "_,");
if (chars < 0) {
return 3;
}
}
if (fprintf(f, "%d\n",np->numbers[problem.sliding.numbers_count-1]) < 0) {
return 3;
}
}
return 0;
}
int write_pathfinding(FILE *f, int path_count, void* path) {
NodePathfinding *path_nodes = (NodePathfinding*)path;
for (int i = path_count - 1; i >= 0; i--) {
if (fprintf(f, "%d,%d\n", path_nodes[i].x, path_nodes[i].y) < 0) {
return 3;
}
}
return 0;
}
int write_file(char *filename, Problem problem, int path_count, void* path, float elapsedTime) {
// Open file descriptor.
FILE *f = fopen(filename, "w");
if (f == NULL) {
perror("couldn't open output file fd");
return 2;
}
// Print out solution to file.
fprintf(f, "%.0f\n", elapsedTime);
int status;
switch (problem.type) {
case PROBLEM_TYPE_SLIDING:
status = write_sliding(f, problem, path_count, path);
break;
case PROBLEM_TYPE_PATHFINDING:
status = write_pathfinding(f, path_count, path);
break;
default:
assert(false);
}
// Close file descriptor.
if (fclose(f)) {
perror("couldn't close output file fd");
return 1;
}
return status;
}
int parse_arguments(int argc, char **argv, int *problem_type,
char **input_file, char **output_file) {
char *version = NULL;
int c;
while (1) {
static struct option long_options[] = {
{"version", required_argument, 0, 'v'},
{"input-data", required_argument, 0, 'i'},
{"output-data", required_argument, 0, 'o'},
{0, 0, 0, 0}
};
int option_index = 0;
c = getopt_long (argc, argv, "v:i:o:", long_options, &option_index);
if (c == -1) {
break;
}
switch (c) {
case 'v':
version = optarg;
break;
case 'i':
*input_file = optarg;
break;
case 'o':
*output_file = optarg;
break;
default:
break;
}
}
if (version == NULL || *input_file == NULL || *output_file == NULL) {
printf("Invalid arguments. Required 'version', 'input-file', 'output-file'.\n");
return 1;
}
*problem_type = 0;
if (!strcmp("pathfinding", version)) {
*problem_type = PROBLEM_TYPE_PATHFINDING;
}
if (!strcmp("sliding", version)) {
*problem_type = PROBLEM_TYPE_SLIDING;
}
if (*problem_type == 0) {
printf("Invalid 'version' argument.\n");
return 1;
}
return 0;
}
int main(int argc, char **argv) {
Problem problem;
char *input_file = NULL;
char *output_file = NULL;
// Parse input data.
if (parse_arguments(argc, argv, &problem.type, &input_file, &output_file)) {
return 1;
}
void *host_node_start, *host_node_dest, *host_nodes_unusual;
void *dev_node_start, *dev_node_dest, *dev_nodes_unusual;
switch (problem.type) {
case PROBLEM_TYPE_PATHFINDING:
if (parse_pathfinding(&problem, input_file, &host_node_start, &host_node_dest, &host_nodes_unusual))
return 1;
handleError(cudaMalloc((void**)&dev_node_start, sizeof(NodePathfinding)));
handleError(cudaMemcpy(dev_node_start, host_node_start, sizeof(NodePathfinding), cudaMemcpyHostToDevice));
handleError(cudaMalloc((void**)&dev_node_dest, sizeof(NodePathfinding)));
handleError(cudaMemcpy(dev_node_dest, host_node_dest, sizeof(NodePathfinding), cudaMemcpyHostToDevice));
handleError(cudaMalloc((void**)&dev_nodes_unusual, problem.pathfinding.unusual_nodes_count*sizeof(NodePathfinding)));
handleError(cudaMemcpy(dev_nodes_unusual, host_nodes_unusual, problem.pathfinding.unusual_nodes_count*sizeof(NodePathfinding), cudaMemcpyHostToDevice));
free(host_nodes_unusual);
problem.pathfinding.map_size = 1024*1024* 1024L;
handleError(cudaMalloc((void**)&problem.pathfinding.map_unusual_nodes, problem.pathfinding.map_size));
break;
case PROBLEM_TYPE_SLIDING:
if (parse_sliding(&problem, input_file, &host_node_start, &host_node_dest))
return 1;
handleError(cudaMalloc((void**)&dev_node_start, problem.sliding.numbers_count*sizeof(int)));
handleError(cudaMemcpy(dev_node_start, host_node_start, sizeof(int)*problem.sliding.numbers_count, cudaMemcpyHostToDevice));
handleError(cudaMalloc((void**)&dev_node_dest, problem.sliding.numbers_count*sizeof(int)));
handleError(cudaMemcpy(dev_node_dest, host_node_dest, sizeof(int)*problem.sliding.numbers_count, cudaMemcpyHostToDevice));
break;
}
// Initialize problem, memory and GPU.
int block_num = 16;
int threads_per_block = 1024;
problem.mem_size = 1024*1024*1024 * 7L;
problem.queues_size = 1024*1024*1024 * 2L;
problem.map_size = 1024*1024*1024 * 2L;
problem.k = (block_num * threads_per_block) / (4*problem.type);
size_t nodes_count = ((problem.type*4)+1)*problem.k;
Node **nodes, **nodes_solutions;
handleError(cudaMalloc((void**)&nodes, sizeof(Node*)*nodes_count));
handleError(cudaMalloc((void**)&nodes_solutions, sizeof(Node*)*problem.k));
handleError(cudaMalloc((void**)&problem.memory, problem.mem_size));
handleError(cudaMalloc((void**)&problem.queues, problem.queues_size));
handleError(cudaMalloc((void**)&problem.map, problem.map_size));
gpu_astar_init<<<1, 1>>>(problem, dev_node_start, dev_node_dest, dev_nodes_unusual);
cudaDeviceSynchronize();
// Run algorithm.
cudaEvent_t start, stop;
float elapsedTime;
handleError(cudaEventCreate(&start));
handleError(cudaEventCreate(&stop));
handleError(cudaEventRecord(start, 0));
gpu_astar<<<block_num, threads_per_block>>>(nodes, nodes_count, nodes_solutions);
handleError(cudaEventRecord(stop, 0));
handleError(cudaEventSynchronize(stop));
handleError(cudaEventElapsedTime(&elapsedTime, start, stop));
handleError(cudaEventDestroy(start));
handleError(cudaEventDestroy(stop));
// Do some final staff (prepare nodes data to copy).
handleError(cudaFree(problem.map));
if (problem.type == PROBLEM_TYPE_PATHFINDING) {
handleError(cudaFree(problem.pathfinding.map_unusual_nodes));
}
int path_count_max = problem.map_size / node_size(problem.type, problem.sliding.n);
void *path, *dev_path;
handleError(cudaMalloc((void**)&dev_path, problem.map_size));
int *dev_path_count;
handleError(cudaMalloc(&dev_path_count, sizeof(int)));
gpu_astar_final<<<1, 1>>>(nodes_solutions, dev_path, dev_path_count, path_count_max);
cudaDeviceSynchronize();
// Copy results back to host memory.
int path_count;
cudaMemcpy(&path_count, dev_path_count, sizeof(int), cudaMemcpyDeviceToHost);
printf("Distance: %d\n", path_count-1);
size_c path_size = node_size(problem.type, problem.sliding.n)*path_count;
path = malloc(path_size);
handleError(cudaMemcpy(path, dev_path, path_size, cudaMemcpyDeviceToHost));
int status = write_file(output_file, problem, path_count, path, elapsedTime);
// Free memory.
handleError(cudaFree(problem.memory));
handleError(cudaFree(problem.queues));
handleError(cudaFree(nodes));
handleError(cudaFree(nodes_solutions));
handleError(cudaFree(dev_node_start));
handleError(cudaFree(dev_node_dest));
free(host_node_start);
free(host_node_dest);
return status;
}
|
7efd18d6f65ae7a56827e96640e8761cfee0a44b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <cudnn.h>
#include <opencv2/opencv.hpp>
#include <cassert>
#include <cstdlib>
#include <iostream>
// function to check for errors
#define checkCUDNN(expression) { \
cudnnStatus_t status = (expression); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
// use opencv to lead/save an image
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_GRAYSCALE);
image.convertTo(image, CV_32FC1);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input image: " << image.rows << " x " << image.cols << " x "
<< image.channels() << std::endl;
return image;
}
void save_image(const char* output_filename, float* buffer, int height, int width) {
cv::Mat output_image(height, width, CV_32FC1, buffer);
// Make negative values zero
cv::threshold(output_image, output_image, /*thershold=*/0, /*maxval=*/0, cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC1);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
#define BATCH 1
#define IN_CHANNELS 1
int main(int argc, char* argv[]) {
cv::Mat img = load_image("./gray_images/0.PNG");
std::cout << img << std::endl;
for(int i = 0; i < img.rows; i++) {
for(int j = 0; j < img.cols; j++) {
//std::cout << img.at(i,j) << "\t";
}
std::cout << std::endl;
}
FILE *f;
char buf[1000];
// read weight files into arrays
// conv1 weights
// 5x5x1x32
f = fopen("./weights/var0.txt", "r");
float h_conv1_kernel[32][1][5][5];
for(int kernel = 0; kernel < 32; kernel++) {
for(int channel = 0; channel < 1; channel++) {
for(int row = 0; row < 5; row++) {
for(int col = 0; col < 5; col++) {
if(fgets(buf,1000,f) != NULL)
h_conv1_kernel[kernel][channel][row][col] = atof(buf);
}
}
}
}
fclose(f);
for(int k = 0; k < 32; k++) {
std::cout << "Kernel " << k << ":" << std::endl;
for(int ch = 0; ch < 1; ch++) {
for(int r = 0; r < 5; r++) {
for(int c = 0; c < 5; c++) {
std::cout << h_conv1_kernel[k][ch][r][c] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
// conv1 bias
// 32
float h_conv1_bias[32];
f = fopen("./weights/var1.txt", "r");
for(int i = 0; i < 32; i++) {
if(fgets(buf, 1000, f) != NULL)
h_conv1_bias[i] = atof(buf);
}
fclose(f);
// conv2 weigts
// 5x5x32x64
f = fopen("./weights/var2.txt", "r");
float h_conv2_kernel[64][32][5][5];
for(int kernel = 0; kernel < 64; kernel++) {
for(int channel = 0; channel < 32; channel++) {
for(int row = 0; row < 5; row++) {
for(int col = 0; col < 5; col++) {
if(fgets(buf, 1000, f) != NULL)
h_conv2_kernel[kernel][channel][row][col] = atof(buf);
}
}
}
}
fclose(f);
//conv2 bias
// 64
f = fopen("./weights/var3.txt", "r");
float h_conv2_bias[64];
for(int i = 0; i < 64; i++) {
if(fgets(buf, 1000, f) != NULL)
h_conv2_bias[i] = atof(buf);
}
fclose(f);
// fully connected layer weights
// 3136x1024
f = fopen("./weights/var4.txt", "r");
float h_fc_weight[3136][1024];
for(int row = 0; row < 3136; row++) {
for(int col = 0; col < 1024; col++) {
if(fgets(buf, 1000, f) != NULL)
h_fc_weight[row][col] = atof(buf);
}
}
fclose(f);
// fully connected layer bias
// 1024
f = fopen("./weights/var5.txt", "r");
float h_fc_bias[1024];
for(int i = 0; i < 1024; i++) {
if(fgets(buf, 1000, f) != NULL)
h_fc_bias[i] = atof(buf);
}
fclose(f);
// output layer weights
// 1024x10
f = fopen("./weights/var6.txt", "r");
float h_out_weight[1024][10];
for(int row = 0; row < 1024; row++) {
for(int col = 0; col < 10; col++) {
if(fgets(buf,1000, f) != NULL)
h_out_weight[row][col] = atof(buf);
}
}
fclose(f);
// output layer bias
// 10
float h_out_bias[10];
f = fopen("./weights/var7.txt", "r");
for(int i = 0; i < 10; i++) {
if(fgets(buf, 1000, f) != NULL) {
h_out_bias[i] = atof(buf);
}
}
fclose(f);
// create handle for cudnn
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// conv1 variables --------------------------------------------------------
// input tensor
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(in_desc,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/1,
/*image_height=*/img.rows,
/*image_width=*/img.cols));
// kernel descriptor
cudnnFilterDescriptor_t conv1_kernel_desc;
checkCUDNN(cudnnCreateFilterDescriptor(&conv1_kernel_desc));
checkCUDNN(cudnnSetFilter4dDescriptor(conv1_kernel_desc,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/1,
/*in_channels=*/1,
/*kernel_height=*/5,
/*kernel_width=*/5));
// convolution descriptor
cudnnConvolutionDescriptor_t conv1_desc;
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1_desc));
checkCUDNN(cudnnSetConvolution2dDescriptor(conv1_desc,
/*pad_height=*/2,
/*pad_width=*/2,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// conv1 output dimensions
int conv1_batch{0}, conv1_chan{0}, conv1_h{0}, conv1_w{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(conv1_desc,
in_desc,
conv1_kernel_desc,
&conv1_batch,
&conv1_chan,
&conv1_h,
&conv1_w));
std::cerr << "Output image: " << conv1_h << " x " << conv1_w << " x "
<< conv1_chan << std::endl;
// conv1 output tensor
cudnnTensorDescriptor_t conv1_out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&conv1_out_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(conv1_out_desc,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/conv1_batch,
/*channels=*/conv1_chan,
/*image_height=*/conv1_h,
/*image_width=*/conv1_w));
// conv1 forward algorithm
cudnnConvolutionFwdAlgo_t conv1_alg;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn,
in_desc,
conv1_kernel_desc,
conv1_desc,
conv1_out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&conv1_alg));
// conv1 workspace size
size_t conv1_workspace{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
in_desc,
conv1_kernel_desc,
conv1_desc,
conv1_out_desc,
conv1_alg,
&conv1_workspace));
std::cerr << "Workspace size: " << (conv1_workspace / 1048576.0)
<< "MB" << std::endl;
assert(conv1_workspace > 0);
// initialize device variables --------------------------------------------
int in_size = BATCH * IN_CHANNELS * img.rows * img.cols * sizeof(float);
float* d_in{nullptr};
hipMalloc(&d_in, in_size);
hipMemcpy(d_in, img.ptr<float>(0), in_size, hipMemcpyHostToDevice);
float* d_conv1_kernel{nullptr};
hipMalloc(&d_conv1_kernel, sizeof(h_conv1_kernel));
hipMemcpy(d_conv1_kernel, h_conv1_kernel, sizeof(h_conv1_kernel), hipMemcpyHostToDevice);
void* d_conv1_work{nullptr};
hipMalloc(&d_conv1_work, conv1_workspace);
int conv1_out_size = conv1_batch * conv1_chan * conv1_h * conv1_w * sizeof(float);
float* d_conv1_out{nullptr};
hipMalloc(&d_conv1_out, conv1_out_size);
hipMemset(d_conv1_out, 0, conv1_out_size);
const float alpha = 1.0f, beta = 0.0f;
// conv1 ------------------------------------------------------------------
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
in_desc,
d_in,
conv1_kernel_desc,
d_conv1_kernel,
conv1_desc,
conv1_alg,
d_conv1_work,
conv1_workspace,
&beta,
conv1_out_desc,
d_conv1_out));
// relu1 ------------------------------------------------------------------
// pool1 ------------------------------------------------------------------
// conv2 ------------------------------------------------------------------
// relu2 ------------------------------------------------------------------
// pool2 ------------------------------------------------------------------
// fc ---------------------------------------------------------------------
// relu3 ------------------------------------------------------------------
// dropout ----------------------------------------------------------------
// output -----------------------------------------------------------------
// copy output to host side
float* h_out = new float[conv1_out_size];
hipMemcpy(h_out, d_conv1_out, conv1_out_size, hipMemcpyDeviceToHost);
std::cout << "Output Data:" << std::endl;
//for(int i = 0; i < 10; i++) {
// std::cout << h_out[i] << std::endl;
//}
//save_image("./out_test.png", h_out, conv1_h, conv1_w);
// free variables
delete[] h_out;
hipFree(d_in);
hipFree(d_conv1_kernel);
hipFree(d_conv1_work);
hipFree(d_conv1_out);
cudnnDestroy(cudnn);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(conv1_kernel_desc);
cudnnDestroyConvolutionDescriptor(conv1_desc);
cudnnDestroyTensorDescriptor(conv1_out_desc);
return 0;
}
| 7efd18d6f65ae7a56827e96640e8761cfee0a44b.cu | #include <stdio.h>
#include <stdint.h>
#include <cudnn.h>
#include <opencv2/opencv.hpp>
#include <cassert>
#include <cstdlib>
#include <iostream>
// function to check for errors
#define checkCUDNN(expression) { \
cudnnStatus_t status = (expression); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
// use opencv to lead/save an image
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_GRAYSCALE);
image.convertTo(image, CV_32FC1);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input image: " << image.rows << " x " << image.cols << " x "
<< image.channels() << std::endl;
return image;
}
void save_image(const char* output_filename, float* buffer, int height, int width) {
cv::Mat output_image(height, width, CV_32FC1, buffer);
// Make negative values zero
cv::threshold(output_image, output_image, /*thershold=*/0, /*maxval=*/0, cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC1);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
#define BATCH 1
#define IN_CHANNELS 1
int main(int argc, char* argv[]) {
cv::Mat img = load_image("./gray_images/0.PNG");
std::cout << img << std::endl;
for(int i = 0; i < img.rows; i++) {
for(int j = 0; j < img.cols; j++) {
//std::cout << img.at(i,j) << "\t";
}
std::cout << std::endl;
}
FILE *f;
char buf[1000];
// read weight files into arrays
// conv1 weights
// 5x5x1x32
f = fopen("./weights/var0.txt", "r");
float h_conv1_kernel[32][1][5][5];
for(int kernel = 0; kernel < 32; kernel++) {
for(int channel = 0; channel < 1; channel++) {
for(int row = 0; row < 5; row++) {
for(int col = 0; col < 5; col++) {
if(fgets(buf,1000,f) != NULL)
h_conv1_kernel[kernel][channel][row][col] = atof(buf);
}
}
}
}
fclose(f);
for(int k = 0; k < 32; k++) {
std::cout << "Kernel " << k << ":" << std::endl;
for(int ch = 0; ch < 1; ch++) {
for(int r = 0; r < 5; r++) {
for(int c = 0; c < 5; c++) {
std::cout << h_conv1_kernel[k][ch][r][c] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
}
// conv1 bias
// 32
float h_conv1_bias[32];
f = fopen("./weights/var1.txt", "r");
for(int i = 0; i < 32; i++) {
if(fgets(buf, 1000, f) != NULL)
h_conv1_bias[i] = atof(buf);
}
fclose(f);
// conv2 weigts
// 5x5x32x64
f = fopen("./weights/var2.txt", "r");
float h_conv2_kernel[64][32][5][5];
for(int kernel = 0; kernel < 64; kernel++) {
for(int channel = 0; channel < 32; channel++) {
for(int row = 0; row < 5; row++) {
for(int col = 0; col < 5; col++) {
if(fgets(buf, 1000, f) != NULL)
h_conv2_kernel[kernel][channel][row][col] = atof(buf);
}
}
}
}
fclose(f);
//conv2 bias
// 64
f = fopen("./weights/var3.txt", "r");
float h_conv2_bias[64];
for(int i = 0; i < 64; i++) {
if(fgets(buf, 1000, f) != NULL)
h_conv2_bias[i] = atof(buf);
}
fclose(f);
// fully connected layer weights
// 3136x1024
f = fopen("./weights/var4.txt", "r");
float h_fc_weight[3136][1024];
for(int row = 0; row < 3136; row++) {
for(int col = 0; col < 1024; col++) {
if(fgets(buf, 1000, f) != NULL)
h_fc_weight[row][col] = atof(buf);
}
}
fclose(f);
// fully connected layer bias
// 1024
f = fopen("./weights/var5.txt", "r");
float h_fc_bias[1024];
for(int i = 0; i < 1024; i++) {
if(fgets(buf, 1000, f) != NULL)
h_fc_bias[i] = atof(buf);
}
fclose(f);
// output layer weights
// 1024x10
f = fopen("./weights/var6.txt", "r");
float h_out_weight[1024][10];
for(int row = 0; row < 1024; row++) {
for(int col = 0; col < 10; col++) {
if(fgets(buf,1000, f) != NULL)
h_out_weight[row][col] = atof(buf);
}
}
fclose(f);
// output layer bias
// 10
float h_out_bias[10];
f = fopen("./weights/var7.txt", "r");
for(int i = 0; i < 10; i++) {
if(fgets(buf, 1000, f) != NULL) {
h_out_bias[i] = atof(buf);
}
}
fclose(f);
// create handle for cudnn
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// conv1 variables --------------------------------------------------------
// input tensor
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(in_desc,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/1,
/*image_height=*/img.rows,
/*image_width=*/img.cols));
// kernel descriptor
cudnnFilterDescriptor_t conv1_kernel_desc;
checkCUDNN(cudnnCreateFilterDescriptor(&conv1_kernel_desc));
checkCUDNN(cudnnSetFilter4dDescriptor(conv1_kernel_desc,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/1,
/*in_channels=*/1,
/*kernel_height=*/5,
/*kernel_width=*/5));
// convolution descriptor
cudnnConvolutionDescriptor_t conv1_desc;
checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1_desc));
checkCUDNN(cudnnSetConvolution2dDescriptor(conv1_desc,
/*pad_height=*/2,
/*pad_width=*/2,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
// conv1 output dimensions
int conv1_batch{0}, conv1_chan{0}, conv1_h{0}, conv1_w{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(conv1_desc,
in_desc,
conv1_kernel_desc,
&conv1_batch,
&conv1_chan,
&conv1_h,
&conv1_w));
std::cerr << "Output image: " << conv1_h << " x " << conv1_w << " x "
<< conv1_chan << std::endl;
// conv1 output tensor
cudnnTensorDescriptor_t conv1_out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&conv1_out_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(conv1_out_desc,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/conv1_batch,
/*channels=*/conv1_chan,
/*image_height=*/conv1_h,
/*image_width=*/conv1_w));
// conv1 forward algorithm
cudnnConvolutionFwdAlgo_t conv1_alg;
checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnn,
in_desc,
conv1_kernel_desc,
conv1_desc,
conv1_out_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
/*memoryLimitInBytes=*/0,
&conv1_alg));
// conv1 workspace size
size_t conv1_workspace{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
in_desc,
conv1_kernel_desc,
conv1_desc,
conv1_out_desc,
conv1_alg,
&conv1_workspace));
std::cerr << "Workspace size: " << (conv1_workspace / 1048576.0)
<< "MB" << std::endl;
assert(conv1_workspace > 0);
// initialize device variables --------------------------------------------
int in_size = BATCH * IN_CHANNELS * img.rows * img.cols * sizeof(float);
float* d_in{nullptr};
cudaMalloc(&d_in, in_size);
cudaMemcpy(d_in, img.ptr<float>(0), in_size, cudaMemcpyHostToDevice);
float* d_conv1_kernel{nullptr};
cudaMalloc(&d_conv1_kernel, sizeof(h_conv1_kernel));
cudaMemcpy(d_conv1_kernel, h_conv1_kernel, sizeof(h_conv1_kernel), cudaMemcpyHostToDevice);
void* d_conv1_work{nullptr};
cudaMalloc(&d_conv1_work, conv1_workspace);
int conv1_out_size = conv1_batch * conv1_chan * conv1_h * conv1_w * sizeof(float);
float* d_conv1_out{nullptr};
cudaMalloc(&d_conv1_out, conv1_out_size);
cudaMemset(d_conv1_out, 0, conv1_out_size);
const float alpha = 1.0f, beta = 0.0f;
// conv1 ------------------------------------------------------------------
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
in_desc,
d_in,
conv1_kernel_desc,
d_conv1_kernel,
conv1_desc,
conv1_alg,
d_conv1_work,
conv1_workspace,
&beta,
conv1_out_desc,
d_conv1_out));
// relu1 ------------------------------------------------------------------
// pool1 ------------------------------------------------------------------
// conv2 ------------------------------------------------------------------
// relu2 ------------------------------------------------------------------
// pool2 ------------------------------------------------------------------
// fc ---------------------------------------------------------------------
// relu3 ------------------------------------------------------------------
// dropout ----------------------------------------------------------------
// output -----------------------------------------------------------------
// copy output to host side
float* h_out = new float[conv1_out_size];
cudaMemcpy(h_out, d_conv1_out, conv1_out_size, cudaMemcpyDeviceToHost);
std::cout << "Output Data:" << std::endl;
//for(int i = 0; i < 10; i++) {
// std::cout << h_out[i] << std::endl;
//}
//save_image("./out_test.png", h_out, conv1_h, conv1_w);
// free variables
delete[] h_out;
cudaFree(d_in);
cudaFree(d_conv1_kernel);
cudaFree(d_conv1_work);
cudaFree(d_conv1_out);
cudnnDestroy(cudnn);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(conv1_kernel_desc);
cudnnDestroyConvolutionDescriptor(conv1_desc);
cudnnDestroyTensorDescriptor(conv1_out_desc);
return 0;
}
|
cefc4504600f9a260de3971e95dbb6ba5e89644d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sieveOfEratosthenesHIP.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *primes = NULL;
hipMalloc(&primes, XSIZE*YSIZE);
uint64_t max = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sieveOfEratosthenesCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sieveOfEratosthenesCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sieveOfEratosthenesCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, primes,max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cefc4504600f9a260de3971e95dbb6ba5e89644d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sieveOfEratosthenesCUDA.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *primes = NULL;
cudaMalloc(&primes, XSIZE*YSIZE);
uint64_t max = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sieveOfEratosthenesCUDA<<<gridBlock,threadBlock>>>(primes,max);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sieveOfEratosthenesCUDA<<<gridBlock,threadBlock>>>(primes,max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sieveOfEratosthenesCUDA<<<gridBlock,threadBlock>>>(primes,max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2cc72f53b46fcd24f725b7b5418fd4db359f4f44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialCrossMapLRN.hip"
#else
void THNN_(LRNforward)(THCState* state, THCTensor* input, THCTensor* output,
THCTensor* scale, int local_size, accreal alpha_, accreal beta_, accreal k_)
{
scalar_t alpha = ScalarConvert<accreal, scalar_t>::to(alpha_);
scalar_t beta = ScalarConvert<accreal, scalar_t>::to(beta_);
scalar_t k = ScalarConvert<accreal, scalar_t>::to(k_);
THCTensor_(resizeAs)(state, output, input);
THCTensor_(resizeAs)(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->dim() == 3) {
batchSize = 1;
nInputPlane = input->size(0);
imsize_h = input->size(1);
imsize_w = input->size(2);
}
else
{
batchSize = input->size(0);
nInputPlane = input->size(1);
imsize_h = input->size(2);
imsize_w = input->size(3);
}
input = THCTensor_(newContiguous)(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNFillScale<scalar_t, accreal>) , dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCTensor_(data)(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCTensor_(data)(state, scale));
n_threads *= nInputPlane;
THCudaCheck(hipGetLastError());
hipLaunchKernelGGL(( LRNComputeOutput), dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCTensor_(data)(state, input), THCTensor_(data)(state, scale), -beta, THCTensor_(data)(state, output));
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(LRNbackward)(THCState* state, THCTensor* input, THCTensor* output,
THCTensor* gradOutput, THCTensor* gradInput, THCTensor* scale,
int local_size, accreal alpha_, accreal beta_, accreal k_)
{
scalar_t alpha = ScalarConvert<accreal, scalar_t>::to(alpha_);
scalar_t beta = ScalarConvert<accreal, scalar_t>::to(beta_);
scalar_t k = ScalarConvert<accreal, scalar_t>::to(k_);
(void) k;
THCTensor_(resizeAs)(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->dim() == 3) {
batchSize = 1;
nInputPlane = input->size(0);
imsize_h = input->size(1);
imsize_w = input->size(2);
}
else
{
batchSize = input->size(0);
nInputPlane = input->size(1);
imsize_h = input->size(2);
imsize_w = input->size(3);
}
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
hipLaunchKernelGGL(( LRNComputeDiff<scalar_t, accreal>) , dim3(GET_BLOCKS(n_threads)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
n_threads, THCTensor_(data)(state, input), THCTensor_(data)(state, output),
THCTensor_(data)(state, scale), THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, ScalarConvert<int, scalar_t>::to(2) * alpha * beta / local_size,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
void THNN_(SpatialCrossMapLRN_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *scale,
int size,
accreal alpha,
accreal beta,
accreal k)
{
THNN_(LRNforward)(state, input, output, scale, size, alpha, beta, k);
}
void THNN_(SpatialCrossMapLRN_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *scale,
THCTensor *output,
int size,
accreal alpha,
accreal beta,
accreal k)
{
THNN_(LRNbackward)(state, input, output, gradOutput, gradInput, scale, size, alpha, beta, k);
}
#endif
| 2cc72f53b46fcd24f725b7b5418fd4db359f4f44.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialCrossMapLRN.cu"
#else
void THNN_(LRNforward)(THCState* state, THCTensor* input, THCTensor* output,
THCTensor* scale, int local_size, accreal alpha_, accreal beta_, accreal k_)
{
scalar_t alpha = ScalarConvert<accreal, scalar_t>::to(alpha_);
scalar_t beta = ScalarConvert<accreal, scalar_t>::to(beta_);
scalar_t k = ScalarConvert<accreal, scalar_t>::to(k_);
THCTensor_(resizeAs)(state, output, input);
THCTensor_(resizeAs)(state, scale, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->dim() == 3) {
batchSize = 1;
nInputPlane = input->size(0);
imsize_h = input->size(1);
imsize_w = input->size(2);
}
else
{
batchSize = input->size(0);
nInputPlane = input->size(1);
imsize_h = input->size(2);
imsize_w = input->size(3);
}
input = THCTensor_(newContiguous)(state, input);
int n_threads = batchSize * imsize_h * imsize_w;
LRNFillScale<scalar_t, accreal> <<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCTensor_(data)(state, input), batchSize, nInputPlane, imsize_h, imsize_w, local_size,
alpha / local_size, k, THCTensor_(data)(state, scale));
n_threads *= nInputPlane;
THCudaCheck(cudaGetLastError());
LRNComputeOutput<<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCTensor_(data)(state, input), THCTensor_(data)(state, scale), -beta, THCTensor_(data)(state, output));
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
void THNN_(LRNbackward)(THCState* state, THCTensor* input, THCTensor* output,
THCTensor* gradOutput, THCTensor* gradInput, THCTensor* scale,
int local_size, accreal alpha_, accreal beta_, accreal k_)
{
scalar_t alpha = ScalarConvert<accreal, scalar_t>::to(alpha_);
scalar_t beta = ScalarConvert<accreal, scalar_t>::to(beta_);
scalar_t k = ScalarConvert<accreal, scalar_t>::to(k_);
(void) k;
THCTensor_(resizeAs)(state, gradInput, input);
int batchSize;
int nInputPlane;
int imsize_h;
int imsize_w;
if (input->dim() == 3) {
batchSize = 1;
nInputPlane = input->size(0);
imsize_h = input->size(1);
imsize_w = input->size(2);
}
else
{
batchSize = input->size(0);
nInputPlane = input->size(1);
imsize_h = input->size(2);
imsize_w = input->size(3);
}
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n_threads = batchSize * imsize_h * imsize_w;
LRNComputeDiff<scalar_t, accreal> <<<GET_BLOCKS(n_threads), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
n_threads, THCTensor_(data)(state, input), THCTensor_(data)(state, output),
THCTensor_(data)(state, scale), THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, imsize_h, imsize_w,
local_size, -beta, ScalarConvert<int, scalar_t>::to(2) * alpha * beta / local_size,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
void THNN_(SpatialCrossMapLRN_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *scale,
int size,
accreal alpha,
accreal beta,
accreal k)
{
THNN_(LRNforward)(state, input, output, scale, size, alpha, beta, k);
}
void THNN_(SpatialCrossMapLRN_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *scale,
THCTensor *output,
int size,
accreal alpha,
accreal beta,
accreal k)
{
THNN_(LRNbackward)(state, input, output, gradOutput, gradInput, scale, size, alpha, beta, k);
}
#endif
|
e8e9342d9cf9813c257c5a121ff425a9e2fe8052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#define BLOCKS 1
#define THREADS_PER_BLOCK 256
PGM_IMG contrast_enhancement_g(PGM_IMG img_in)
{
PGM_IMG result;
int hist[256];
result.w = img_in.w;
result.h = img_in.h;
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
histogram(hist, img_in.img, img_in.h * img_in.w, 256);
histogram_equalization(result.img,img_in.img,hist,result.w*result.h, 256);
return result;
}
PGM_IMG contrast_enhancement_g_gpu(PGM_IMG img_in)
{
PGM_IMG result;
//int hist[256];
result.w = img_in.w;
result.h = img_in.h;
int size = result.w * result.h * sizeof(unsigned char);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate memory on gpu
unsigned char *result_img;
unsigned char *img_in_img;
int *hist;
int size2 = result.w * result.h;
hipMalloc( (void **)&hist, 256*sizeof(int));
hipMalloc( (void **)&img_in_img, size );
hipMalloc( (void **)&result_img, size );
hipMemcpy( img_in_img, img_in.img, size, hipMemcpyHostToDevice);
hipMemcpy( result_img, result.img, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, hist, img_in_img, size2, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, result_img, img_in_img, hist, size2, 256);
hipMemcpy( result.img, result_img, size, hipMemcpyDeviceToHost );
hipFree(result_img);
hipFree(img_in_img);
hipFree(hist);
return result;
}
PPM_IMG contrast_enhancement_c_rgb(PPM_IMG img_in)
{
PPM_IMG result;
int hist[256];
result.w = img_in.w;
result.h = img_in.h;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
histogram(hist, img_in.img_r, img_in.h * img_in.w, 256);
histogram_equalization(result.img_r,img_in.img_r,hist,result.w*result.h, 256);
histogram(hist, img_in.img_g, img_in.h * img_in.w, 256);
histogram_equalization(result.img_g,img_in.img_g,hist,result.w*result.h, 256);
histogram(hist, img_in.img_b, img_in.h * img_in.w, 256);
histogram_equalization(result.img_b,img_in.img_b,hist,result.w*result.h, 256);
return result;
}
PPM_IMG contrast_enhancement_c_rgb_gpu(PPM_IMG img_in)
{
PPM_IMG result;
//int hist[256];
result.w = img_in.w;
result.h = img_in.h;
int size = result.w * result.h * sizeof(unsigned char);
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate memory on gpu
unsigned char *result_img_r;
unsigned char *result_img_g;
unsigned char *result_img_b;
unsigned char *img_in_img_r;
unsigned char *img_in_img_g;
unsigned char *img_in_img_b;
int *hist;
int size2 = result.w * result.h;
hipMalloc( (void **)&hist, 256*sizeof(int));
hipMalloc( (void **)&img_in_img_r, size );
hipMalloc( (void **)&img_in_img_g, size );
hipMalloc( (void **)&img_in_img_b, size );
hipMalloc( (void **)&result_img_r, size );
hipMalloc( (void **)&result_img_g, size );
hipMalloc( (void **)&result_img_b, size );
hipMemcpy( img_in_img_r, img_in.img_r, size, hipMemcpyHostToDevice);
hipMemcpy( img_in_img_g, img_in.img_g, size, hipMemcpyHostToDevice);
hipMemcpy( img_in_img_b, img_in.img_b, size, hipMemcpyHostToDevice);
hipMemcpy( result_img_r, result.img_r, size, hipMemcpyHostToDevice);
hipMemcpy( result_img_g, result.img_g, size, hipMemcpyHostToDevice);
hipMemcpy( result_img_b, result.img_b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, hist, img_in.img_r, size2, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, result.img_r,img_in.img_r,hist, size2, 256);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, hist, img_in.img_g, size2, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, result.img_g,img_in.img_g,hist, size2, 256);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, hist, img_in.img_b, size2, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, result.img_b,img_in.img_b,hist, size2, 256);
hipMemcpy( result.img_r, result_img_r, size, hipMemcpyDeviceToHost );
hipMemcpy( result.img_g, result_img_g, size, hipMemcpyDeviceToHost );
hipMemcpy( result.img_b, result_img_b, size, hipMemcpyDeviceToHost );
hipFree(result_img_r);
hipFree(result_img_g);
hipFree(result_img_b);
hipFree(img_in_img_r);
hipFree(img_in_img_g);
hipFree(img_in_img_b);
hipFree(hist);
return result;
}
PPM_IMG contrast_enhancement_c_yuv(PPM_IMG img_in)
{
YUV_IMG yuv_med;
PPM_IMG result;
unsigned char * y_equ;
int hist[256];
yuv_med = rgb2yuv(img_in);
y_equ = (unsigned char *)malloc(yuv_med.h*yuv_med.w*sizeof(unsigned char));
histogram(hist, yuv_med.img_y, yuv_med.h * yuv_med.w, 256);
histogram_equalization(y_equ,yuv_med.img_y,hist,yuv_med.h * yuv_med.w, 256);
free(yuv_med.img_y);
yuv_med.img_y = y_equ;
result = yuv2rgb(yuv_med);
free(yuv_med.img_y);
free(yuv_med.img_u);
free(yuv_med.img_v);
return result;
}
PPM_IMG contrast_enhancement_c_yuv_gpu(PPM_IMG img_in)
{
YUV_IMG yuv_med;
PPM_IMG result;
unsigned char * y_equ;
//int hist[256];
yuv_med = rgb2yuv_gpu(img_in);
int size = yuv_med.h * yuv_med.w;
y_equ = (unsigned char *)malloc(size*sizeof(unsigned char));
// allocate memmory on gpu
int *hist;
unsigned char* yuv_med_y;
unsigned char* y_equ_g;
hipMalloc( (void **)&hist, 256*sizeof(int));
hipMalloc( (void **)&yuv_med_y, size*sizeof(unsigned char) );
hipMalloc( (void **)&y_equ_g, size*sizeof(unsigned char) );
hipMemcpy( yuv_med_y, yuv_med.img_y, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy( y_equ_g, y_equ, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, hist, yuv_med_y, size, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, y_equ_g,yuv_med_y,hist,size, 256);
hipMemcpy( y_equ, y_equ_g, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
free(yuv_med.img_y);
yuv_med.img_y = y_equ;
result = yuv2rgb_gpu(yuv_med);
free(yuv_med.img_y);
free(yuv_med.img_u);
free(yuv_med.img_v);
hipFree(hist);
hipFree(yuv_med_y);
hipFree(y_equ_g);
return result;
}
PPM_IMG contrast_enhancement_c_hsl(PPM_IMG img_in)
{
HSL_IMG hsl_med;
PPM_IMG result;
unsigned char * l_equ;
int hist[256];
hsl_med = rgb2hsl(img_in);
l_equ = (unsigned char *)malloc(hsl_med.height*hsl_med.width*sizeof(unsigned char));
histogram(hist, hsl_med.l, hsl_med.height * hsl_med.width, 256);
histogram_equalization(l_equ, hsl_med.l,hist,hsl_med.width*hsl_med.height, 256);
free(hsl_med.l);
hsl_med.l = l_equ;
result = hsl2rgb(hsl_med);
free(hsl_med.h);
free(hsl_med.s);
free(hsl_med.l);
return result;
}
PPM_IMG contrast_enhancement_c_hsl_gpu(PPM_IMG img_in)
{
HSL_IMG hsl_med;
PPM_IMG result;
unsigned char * l_equ;
//int hist[256];
hsl_med = rgb2hsl_gpu(img_in);
int size = hsl_med.height * hsl_med.width;
l_equ = (unsigned char *)malloc(size*sizeof(unsigned char));
// allocate memmory on gpu
int *hist;
unsigned char* hsl_med_l;
unsigned char* l_equ_g;
hipMalloc( (void **)&hist, 256*sizeof(int));
hipMalloc( (void **)&hsl_med_l, size*sizeof(unsigned char) );
hipMalloc( (void **)&l_equ_g, size*sizeof(unsigned char) );
hipMemcpy( hsl_med_l, hsl_med.l, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy( l_equ_g, l_equ, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, hist, hsl_med_l, size, 256);
hipLaunchKernelGGL(( histogram_equalization_gpu), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, l_equ_g,hsl_med_l,hist,size, 256);
hipMemcpy( l_equ, l_equ_g, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
free(hsl_med.l);
hsl_med.l = l_equ;
result = hsl2rgb_gpu(hsl_med);
free(hsl_med.h);
free(hsl_med.s);
free(hsl_med.l);
hipFree(hist);
hipFree(hsl_med_l);
hipFree(l_equ_g);
return result;
}
//Convert RGB to HSL, assume R,G,B in [0, 255]
//Output H, S in [0.0, 1.0] and L in [0, 255]
HSL_IMG rgb2hsl(PPM_IMG img_in)
{
int i;
float H, S, L;
HSL_IMG img_out;// = (HSL_IMG *)malloc(sizeof(HSL_IMG));
img_out.width = img_in.w;
img_out.height = img_in.h;
img_out.h = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.s = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.l = (unsigned char *)malloc(img_in.w * img_in.h * sizeof(unsigned char));
for(i = 0; i < img_in.w*img_in.h; i ++){
float var_r = ( (float)img_in.img_r[i]/255 );//Convert RGB to [0,1]
float var_g = ( (float)img_in.img_g[i]/255 );
float var_b = ( (float)img_in.img_b[i]/255 );
float var_min = (var_r < var_g) ? var_r : var_g;
var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB
float var_max = (var_r > var_g) ? var_r : var_g;
var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB
float del_max = var_max - var_min; //Delta RGB value
L = ( var_max + var_min ) / 2;
if ( del_max == 0 )//This is a gray, no chroma...
{
H = 0;
S = 0;
}
else //Chromatic data...
{
if ( L < 0.5 )
S = del_max/(var_max+var_min);
else
S = del_max/(2-var_max-var_min );
float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max;
float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max;
float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max;
if( var_r == var_max ){
H = del_b - del_g;
}
else{
if( var_g == var_max ){
H = (1.0/3.0) + del_r - del_b;
}
else{
H = (2.0/3.0) + del_g - del_r;
}
}
}
if ( H < 0 )
H += 1;
if ( H > 1 )
H -= 1;
img_out.h[i] = H;
img_out.s[i] = S;
img_out.l[i] = (unsigned char)(L*255);
}
return img_out;
}
// gpu version
HSL_IMG rgb2hsl_gpu(PPM_IMG img_in)
{
//int i;
//float H, S, L;
HSL_IMG img_out;// = (HSL_IMG *)malloc(sizeof(HSL_IMG));
img_out.width = img_in.w;
img_out.height = img_in.h;
img_out.h = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.s = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.l = (unsigned char *)malloc(img_in.w * img_in.h * sizeof(unsigned char));
// allocate gpu memory
unsigned char *r, *g, *b;
float *h, *s;
unsigned char *l;
int size = img_in.w * img_in.h;
hipMalloc((void **)&r, size*sizeof(unsigned char));
hipMalloc((void **)&g, size*sizeof(unsigned char));
hipMalloc((void **)&b, size*sizeof(unsigned char));
hipMalloc((void **)&h, size*sizeof(float));
hipMalloc((void **)&s, size*sizeof(float));
hipMalloc((void **)&l, size*sizeof(unsigned char));
// copy memory from host to device
hipMemcpy(r, img_in.img_r, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(g, img_in.img_g, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(b, img_in.img_b, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(h, img_out.h, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(s, img_out.s, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(l, img_out.l, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rgb2hsl_call), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, r,g,b,h,s,l,img_in.w,img_in.h);
// copy memory from device to host
hipMemcpy(img_out.h, h, size*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(img_out.s, s, size*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(img_out.l, l, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// free gpu memory
hipFree(r);
hipFree(g);
hipFree(b);
hipFree(h);
hipFree(s);
hipFree(l);
return img_out;
}
__global__ void rgb2hsl_call( unsigned char* in_r, unsigned char* in_g, unsigned char* in_b,
float* out_h, float* out_s, unsigned char* out_l, int w, int h )
{
int size = w*h;
int i;
float H, S, L;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK)
{
float var_r = ( (float)in_r[i]/255 );//Convert RGB to [0,1]
float var_g = ( (float)in_g[i]/255 );
float var_b = ( (float)in_b[i]/255 );
float var_min = (var_r < var_g) ? var_r : var_g;
var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB
float var_max = (var_r > var_g) ? var_r : var_g;
var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB
float del_max = var_max - var_min; //Delta RGB value
L = ( var_max + var_min ) / 2;
if ( del_max == 0 )//This is a gray, no chroma...
{
H = 0;
S = 0;
}
else //Chromatic data...
{
if ( L < 0.5 )
S = del_max/(var_max+var_min);
else
S = del_max/(2-var_max-var_min );
float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max;
float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max;
float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max;
if( var_r == var_max ){
H = del_b - del_g;
}
else{
if( var_g == var_max ){
H = (1.0/3.0) + del_r - del_b;
}
else{
H = (2.0/3.0) + del_g - del_r;
}
}
}
if ( H < 0 )
H += 1;
if ( H > 1 )
H -= 1;
out_h[i] = H;
out_s[i] = S;
out_l[i] = (unsigned char)(L*255);
}
}
float Hue_2_RGB( float v1, float v2, float vH ) //Function Hue_2_RGB
{
if ( vH < 0 ) vH += 1;
if ( vH > 1 ) vH -= 1;
if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH );
if ( ( 2 * vH ) < 1 ) return ( v2 );
if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 );
return ( v1 );
}
//Convert HSL to RGB, assume H, S in [0.0, 1.0] and L in [0, 255]
//Output R,G,B in [0, 255]
PPM_IMG hsl2rgb(HSL_IMG img_in)
{
int i;
PPM_IMG result;
result.w = img_in.width;
result.h = img_in.height;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
for(i = 0; i < img_in.width*img_in.height; i ++){
float H = img_in.h[i];
float S = img_in.s[i];
float L = img_in.l[i]/255.0f;
float var_1, var_2;
unsigned char r,g,b;
if ( S == 0 )
{
r = L * 255;
g = L * 255;
b = L * 255;
}
else
{
if ( L < 0.5 )
var_2 = L * ( 1 + S );
else
var_2 = ( L + S ) - ( S * L );
var_1 = 2 * L - var_2;
r = 255 * Hue_2_RGB( var_1, var_2, H + (1.0f/3.0f) );
g = 255 * Hue_2_RGB( var_1, var_2, H );
b = 255 * Hue_2_RGB( var_1, var_2, H - (1.0f/3.0f) );
}
result.img_r[i] = r;
result.img_g[i] = g;
result.img_b[i] = b;
}
return result;
}
// gpu version
PPM_IMG hsl2rgb_gpu(HSL_IMG img_in)
{
//int i;
PPM_IMG result;
result.w = img_in.width;
result.h = img_in.height;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate gpu memory
float *h, *s;
unsigned char *l;
unsigned char *r, *g, *b;
int size = result.w*result.h;
hipMalloc((void **)&r, size*sizeof(unsigned char));
hipMalloc((void **)&g, size*sizeof(unsigned char));
hipMalloc((void **)&b, size*sizeof(unsigned char));
hipMalloc((void **)&h, size*sizeof(float));
hipMalloc((void **)&s, size*sizeof(float));
hipMalloc((void **)&l, size*sizeof(unsigned char));
// copy memory from host to device
hipMemcpy(r, result.img_r, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(g, result.img_g, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(b, result.img_b, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(h, img_in.h, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(s, img_in.s, size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(l, img_in.l, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( hsl2rgb_call), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, h,s,l,r,g,b,result.w,result.h);
// copy memory from device to host
hipMemcpy(result.img_r, r, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(result.img_g, g, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(result.img_b, b, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// free gpu memory
hipFree(r);
hipFree(g);
hipFree(b);
hipFree(h);
hipFree(s);
hipFree(l);
return result;
}
__global__ void hsl2rgb_call( float *in_h, float *in_s, unsigned char *in_l,
unsigned char *out_r, unsigned char *out_g, unsigned char *out_b, int w, int h)
{
int i;
int size = w*h;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK){
float H = in_h[i];
float S = in_s[i];
float L = in_l[i]/255.0f;
float var_1, var_2;
unsigned char r,g,b;
if ( S == 0 )
{
r = L * 255;
g = L * 255;
b = L * 255;
}
else
{
if ( L < 0.5 )
var_2 = L * ( 1 + S );
else
var_2 = ( L + S ) - ( S * L );
var_1 = 2 * L - var_2;
float value[3];
float vH;
for (int j=0;j<3;j++)
{
vH = H - (j-1)*(1.0f/3.0f);
if ( vH < 0 ) vH += 1;
if ( vH > 1 ) vH -= 1;
if ( ( 6 * vH ) < 1 )
value[j] = var_1 + ( var_2 - var_1 ) * 6 * vH ;
else if ( ( 2 * vH ) < 1 )
value[j] = var_2 ;
else if ( ( 3 * vH ) < 2 )
value[j] = var_1 + ( var_2 - var_1 ) * ( ( 2.0f/3.0f ) - vH ) * 6;
else
value[j] = var_1;
}
//r = 255 * Hue_2_RGB( var_1, var_2, H + (1.0f/3.0f) );
//g = 255 * Hue_2_RGB( var_1, var_2, H );
//b = 255 * Hue_2_RGB( var_1, var_2, H - (1.0f/3.0f) );
r = 255 * value[0];
g = 255 * value[1];
b = 255 * value[2];
}
out_r[i] = r;
out_g[i] = g;
out_b[i] = b;
}
}
//Convert RGB to YUV, all components in [0, 255]
YUV_IMG rgb2yuv(PPM_IMG img_in)
{
YUV_IMG img_out;
int i;//, j;
unsigned char r, g, b;
unsigned char y, cb, cr;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
for(i = 0; i < img_out.w*img_out.h; i ++){
r = img_in.img_r[i];
g = img_in.img_g[i];
b = img_in.img_b[i];
y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128);
cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128);
img_out.img_y[i] = y;
img_out.img_u[i] = cb;
img_out.img_v[i] = cr;
}
return img_out;
}
// gpu version
YUV_IMG rgb2yuv_gpu(PPM_IMG img_in)
{
YUV_IMG img_out;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
// allocate memory on gpu
unsigned char *r, *g, *b, *y, *u, *v;
int size = img_out.w*img_out.h;
hipMalloc((void **)&r, size*sizeof(unsigned char));
hipMalloc((void **)&g, size*sizeof(unsigned char));
hipMalloc((void **)&b, size*sizeof(unsigned char));
hipMalloc((void **)&y, size*sizeof(unsigned char));
hipMalloc((void **)&u, size*sizeof(unsigned char));
hipMalloc((void **)&v, size*sizeof(unsigned char));
// copy memory from host to device
hipMemcpy(r, img_in.img_r, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(g, img_in.img_g, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(b, img_in.img_b, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(y, img_out.img_y, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(u, img_out.img_u, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(v, img_out.img_v, size*sizeof(unsigned char), hipMemcpyHostToDevice);
// call gpu function
hipLaunchKernelGGL(( rgb2yuv_call), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, r,g,b,y,u,v,img_out.w,img_out.h);
// copy memory from device to host
hipMemcpy(img_out.img_y, y, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(img_out.img_u, u, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(img_out.img_v, v, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// free gpu memory
hipFree(r);
hipFree(g);
hipFree(b);
hipFree(y);
hipFree(u);
hipFree(v);
return img_out;
}
__global__ void rgb2yuv_call( unsigned char *in_r, unsigned char *in_g, unsigned char *in_b,
unsigned char *out_y, unsigned char *out_u, unsigned char *out_v, int w, int h)
{
int i;
unsigned char r, g, b;
unsigned char y, cb, cr;
int size = w*h;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK){
r = in_r[i];
g = in_g[i];
b = in_b[i];
y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128);
cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128);
out_y[i] = y;
out_u[i] = cb;
out_v[i] = cr;
}
}
unsigned char clip_rgb(int x)
{
if(x > 255)
return 255;
if(x < 0)
return 0;
return (unsigned char)x;
}
//Convert YUV to RGB, all components in [0, 255]
PPM_IMG yuv2rgb(YUV_IMG img_in)
{
PPM_IMG img_out;
int i;
int rt,gt,bt;
int y, cb, cr;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
for(i = 0; i < img_out.w*img_out.h; i ++){
y = (int)img_in.img_y[i];
cb = (int)img_in.img_u[i] - 128;
cr = (int)img_in.img_v[i] - 128;
rt = (int)( y + 1.402*cr);
gt = (int)( y - 0.344*cb - 0.714*cr);
bt = (int)( y + 1.772*cb);
img_out.img_r[i] = clip_rgb(rt);
img_out.img_g[i] = clip_rgb(gt);
img_out.img_b[i] = clip_rgb(bt);
}
return img_out;
}
// gpu version
PPM_IMG yuv2rgb_gpu(YUV_IMG img_in)
{
PPM_IMG img_out;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
// allocate memory on gpu
unsigned char *r, *g, *b, *y, *u, *v;
int size = img_out.w*img_out.h;
hipMalloc((void **)&r, size*sizeof(unsigned char));
hipMalloc((void **)&g, size*sizeof(unsigned char));
hipMalloc((void **)&b, size*sizeof(unsigned char));
hipMalloc((void **)&y, size*sizeof(unsigned char));
hipMalloc((void **)&u, size*sizeof(unsigned char));
hipMalloc((void **)&v, size*sizeof(unsigned char));
// copy memory from host to device
hipMemcpy(r, img_out.img_r, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(g, img_out.img_g, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(b, img_out.img_b, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(y, img_in.img_y, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(u, img_in.img_u, size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(v, img_in.img_v, size*sizeof(unsigned char), hipMemcpyHostToDevice);
// call gpu function
hipLaunchKernelGGL(( yuv2rgb_call), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, y,u,v,r,g,b,img_out.w,img_out.h);
// copy memory from device to host
hipMemcpy(img_out.img_r, r, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(img_out.img_g, g, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(img_out.img_b, b, size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// free gpu memory
hipFree(r);
hipFree(g);
hipFree(b);
hipFree(y);
hipFree(u);
hipFree(v);
return img_out;
}
__global__ void yuv2rgb_call( unsigned char *in_y, unsigned char *in_u, unsigned char *in_v,
unsigned char *out_r, unsigned char *out_g, unsigned char *out_b, int w, int h)
{
int i;
int rt,gt,bt;
int y, cb, cr;
int size = w*h;
for(i = threadIdx.x; i < size; i +=THREADS_PER_BLOCK ){
y = (int)in_y[i];
cb = (int)in_u[i] - 128;
cr = (int)in_v[i] - 128;
rt = (int)( y + 1.402*cr);
gt = (int)( y - 0.344*cb - 0.714*cr);
bt = (int)( y + 1.772*cb);
//out_r[i] = clip_rgb(rt);
//out_g[i] = clip_rgb(gt);
//out_b[i] = clip_rgb(bt);
//out_r[i] = (unsigned char)( (rt>255?255:rt)<0?0:rt );
//out_g[i] = (unsigned char)( (gt>255?255:rt)<0?0:gt );
//out_b[i] = (unsigned char)( (bt>255?255:rt)<0?0:bt );
if(rt>255)
out_r[i] = 255;
else if(rt<0)
out_r[i] = 0;
else
out_r[i] = (unsigned char)(rt);
if(gt>255)
out_g[i] = 255;
else if(gt<0)
out_g[i] = 0;
else
out_g[i] = (unsigned char)(gt);
if(bt>255)
out_b[i] = 255;
else if(bt<0)
out_b[i] = 0;
else
out_b[i] = (unsigned char)(bt);
}
}
| e8e9342d9cf9813c257c5a121ff425a9e2fe8052.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
#define BLOCKS 1
#define THREADS_PER_BLOCK 256
PGM_IMG contrast_enhancement_g(PGM_IMG img_in)
{
PGM_IMG result;
int hist[256];
result.w = img_in.w;
result.h = img_in.h;
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
histogram(hist, img_in.img, img_in.h * img_in.w, 256);
histogram_equalization(result.img,img_in.img,hist,result.w*result.h, 256);
return result;
}
PGM_IMG contrast_enhancement_g_gpu(PGM_IMG img_in)
{
PGM_IMG result;
//int hist[256];
result.w = img_in.w;
result.h = img_in.h;
int size = result.w * result.h * sizeof(unsigned char);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate memory on gpu
unsigned char *result_img;
unsigned char *img_in_img;
int *hist;
int size2 = result.w * result.h;
cudaMalloc( (void **)&hist, 256*sizeof(int));
cudaMalloc( (void **)&img_in_img, size );
cudaMalloc( (void **)&result_img, size );
cudaMemcpy( img_in_img, img_in.img, size, cudaMemcpyHostToDevice);
cudaMemcpy( result_img, result.img, size, cudaMemcpyHostToDevice);
histogram_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(hist, img_in_img, size2, 256);
histogram_equalization_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(result_img, img_in_img, hist, size2, 256);
cudaMemcpy( result.img, result_img, size, cudaMemcpyDeviceToHost );
cudaFree(result_img);
cudaFree(img_in_img);
cudaFree(hist);
return result;
}
PPM_IMG contrast_enhancement_c_rgb(PPM_IMG img_in)
{
PPM_IMG result;
int hist[256];
result.w = img_in.w;
result.h = img_in.h;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
histogram(hist, img_in.img_r, img_in.h * img_in.w, 256);
histogram_equalization(result.img_r,img_in.img_r,hist,result.w*result.h, 256);
histogram(hist, img_in.img_g, img_in.h * img_in.w, 256);
histogram_equalization(result.img_g,img_in.img_g,hist,result.w*result.h, 256);
histogram(hist, img_in.img_b, img_in.h * img_in.w, 256);
histogram_equalization(result.img_b,img_in.img_b,hist,result.w*result.h, 256);
return result;
}
PPM_IMG contrast_enhancement_c_rgb_gpu(PPM_IMG img_in)
{
PPM_IMG result;
//int hist[256];
result.w = img_in.w;
result.h = img_in.h;
int size = result.w * result.h * sizeof(unsigned char);
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate memory on gpu
unsigned char *result_img_r;
unsigned char *result_img_g;
unsigned char *result_img_b;
unsigned char *img_in_img_r;
unsigned char *img_in_img_g;
unsigned char *img_in_img_b;
int *hist;
int size2 = result.w * result.h;
cudaMalloc( (void **)&hist, 256*sizeof(int));
cudaMalloc( (void **)&img_in_img_r, size );
cudaMalloc( (void **)&img_in_img_g, size );
cudaMalloc( (void **)&img_in_img_b, size );
cudaMalloc( (void **)&result_img_r, size );
cudaMalloc( (void **)&result_img_g, size );
cudaMalloc( (void **)&result_img_b, size );
cudaMemcpy( img_in_img_r, img_in.img_r, size, cudaMemcpyHostToDevice);
cudaMemcpy( img_in_img_g, img_in.img_g, size, cudaMemcpyHostToDevice);
cudaMemcpy( img_in_img_b, img_in.img_b, size, cudaMemcpyHostToDevice);
cudaMemcpy( result_img_r, result.img_r, size, cudaMemcpyHostToDevice);
cudaMemcpy( result_img_g, result.img_g, size, cudaMemcpyHostToDevice);
cudaMemcpy( result_img_b, result.img_b, size, cudaMemcpyHostToDevice);
histogram_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(hist, img_in.img_r, size2, 256);
histogram_equalization_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(result.img_r,img_in.img_r,hist, size2, 256);
histogram_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(hist, img_in.img_g, size2, 256);
histogram_equalization_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(result.img_g,img_in.img_g,hist, size2, 256);
histogram_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(hist, img_in.img_b, size2, 256);
histogram_equalization_gpu<<<BLOCKS,THREADS_PER_BLOCK>>>(result.img_b,img_in.img_b,hist, size2, 256);
cudaMemcpy( result.img_r, result_img_r, size, cudaMemcpyDeviceToHost );
cudaMemcpy( result.img_g, result_img_g, size, cudaMemcpyDeviceToHost );
cudaMemcpy( result.img_b, result_img_b, size, cudaMemcpyDeviceToHost );
cudaFree(result_img_r);
cudaFree(result_img_g);
cudaFree(result_img_b);
cudaFree(img_in_img_r);
cudaFree(img_in_img_g);
cudaFree(img_in_img_b);
cudaFree(hist);
return result;
}
PPM_IMG contrast_enhancement_c_yuv(PPM_IMG img_in)
{
YUV_IMG yuv_med;
PPM_IMG result;
unsigned char * y_equ;
int hist[256];
yuv_med = rgb2yuv(img_in);
y_equ = (unsigned char *)malloc(yuv_med.h*yuv_med.w*sizeof(unsigned char));
histogram(hist, yuv_med.img_y, yuv_med.h * yuv_med.w, 256);
histogram_equalization(y_equ,yuv_med.img_y,hist,yuv_med.h * yuv_med.w, 256);
free(yuv_med.img_y);
yuv_med.img_y = y_equ;
result = yuv2rgb(yuv_med);
free(yuv_med.img_y);
free(yuv_med.img_u);
free(yuv_med.img_v);
return result;
}
PPM_IMG contrast_enhancement_c_yuv_gpu(PPM_IMG img_in)
{
YUV_IMG yuv_med;
PPM_IMG result;
unsigned char * y_equ;
//int hist[256];
yuv_med = rgb2yuv_gpu(img_in);
int size = yuv_med.h * yuv_med.w;
y_equ = (unsigned char *)malloc(size*sizeof(unsigned char));
// allocate memmory on gpu
int *hist;
unsigned char* yuv_med_y;
unsigned char* y_equ_g;
cudaMalloc( (void **)&hist, 256*sizeof(int));
cudaMalloc( (void **)&yuv_med_y, size*sizeof(unsigned char) );
cudaMalloc( (void **)&y_equ_g, size*sizeof(unsigned char) );
cudaMemcpy( yuv_med_y, yuv_med.img_y, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy( y_equ_g, y_equ, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
histogram_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(hist, yuv_med_y, size, 256);
histogram_equalization_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(y_equ_g,yuv_med_y,hist,size, 256);
cudaMemcpy( y_equ, y_equ_g, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
free(yuv_med.img_y);
yuv_med.img_y = y_equ;
result = yuv2rgb_gpu(yuv_med);
free(yuv_med.img_y);
free(yuv_med.img_u);
free(yuv_med.img_v);
cudaFree(hist);
cudaFree(yuv_med_y);
cudaFree(y_equ_g);
return result;
}
PPM_IMG contrast_enhancement_c_hsl(PPM_IMG img_in)
{
HSL_IMG hsl_med;
PPM_IMG result;
unsigned char * l_equ;
int hist[256];
hsl_med = rgb2hsl(img_in);
l_equ = (unsigned char *)malloc(hsl_med.height*hsl_med.width*sizeof(unsigned char));
histogram(hist, hsl_med.l, hsl_med.height * hsl_med.width, 256);
histogram_equalization(l_equ, hsl_med.l,hist,hsl_med.width*hsl_med.height, 256);
free(hsl_med.l);
hsl_med.l = l_equ;
result = hsl2rgb(hsl_med);
free(hsl_med.h);
free(hsl_med.s);
free(hsl_med.l);
return result;
}
PPM_IMG contrast_enhancement_c_hsl_gpu(PPM_IMG img_in)
{
HSL_IMG hsl_med;
PPM_IMG result;
unsigned char * l_equ;
//int hist[256];
hsl_med = rgb2hsl_gpu(img_in);
int size = hsl_med.height * hsl_med.width;
l_equ = (unsigned char *)malloc(size*sizeof(unsigned char));
// allocate memmory on gpu
int *hist;
unsigned char* hsl_med_l;
unsigned char* l_equ_g;
cudaMalloc( (void **)&hist, 256*sizeof(int));
cudaMalloc( (void **)&hsl_med_l, size*sizeof(unsigned char) );
cudaMalloc( (void **)&l_equ_g, size*sizeof(unsigned char) );
cudaMemcpy( hsl_med_l, hsl_med.l, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy( l_equ_g, l_equ, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
histogram_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(hist, hsl_med_l, size, 256);
histogram_equalization_gpu<<<BLOCKS, THREADS_PER_BLOCK>>>(l_equ_g,hsl_med_l,hist,size, 256);
cudaMemcpy( l_equ, l_equ_g, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
free(hsl_med.l);
hsl_med.l = l_equ;
result = hsl2rgb_gpu(hsl_med);
free(hsl_med.h);
free(hsl_med.s);
free(hsl_med.l);
cudaFree(hist);
cudaFree(hsl_med_l);
cudaFree(l_equ_g);
return result;
}
//Convert RGB to HSL, assume R,G,B in [0, 255]
//Output H, S in [0.0, 1.0] and L in [0, 255]
HSL_IMG rgb2hsl(PPM_IMG img_in)
{
int i;
float H, S, L;
HSL_IMG img_out;// = (HSL_IMG *)malloc(sizeof(HSL_IMG));
img_out.width = img_in.w;
img_out.height = img_in.h;
img_out.h = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.s = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.l = (unsigned char *)malloc(img_in.w * img_in.h * sizeof(unsigned char));
for(i = 0; i < img_in.w*img_in.h; i ++){
float var_r = ( (float)img_in.img_r[i]/255 );//Convert RGB to [0,1]
float var_g = ( (float)img_in.img_g[i]/255 );
float var_b = ( (float)img_in.img_b[i]/255 );
float var_min = (var_r < var_g) ? var_r : var_g;
var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB
float var_max = (var_r > var_g) ? var_r : var_g;
var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB
float del_max = var_max - var_min; //Delta RGB value
L = ( var_max + var_min ) / 2;
if ( del_max == 0 )//This is a gray, no chroma...
{
H = 0;
S = 0;
}
else //Chromatic data...
{
if ( L < 0.5 )
S = del_max/(var_max+var_min);
else
S = del_max/(2-var_max-var_min );
float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max;
float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max;
float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max;
if( var_r == var_max ){
H = del_b - del_g;
}
else{
if( var_g == var_max ){
H = (1.0/3.0) + del_r - del_b;
}
else{
H = (2.0/3.0) + del_g - del_r;
}
}
}
if ( H < 0 )
H += 1;
if ( H > 1 )
H -= 1;
img_out.h[i] = H;
img_out.s[i] = S;
img_out.l[i] = (unsigned char)(L*255);
}
return img_out;
}
// gpu version
HSL_IMG rgb2hsl_gpu(PPM_IMG img_in)
{
//int i;
//float H, S, L;
HSL_IMG img_out;// = (HSL_IMG *)malloc(sizeof(HSL_IMG));
img_out.width = img_in.w;
img_out.height = img_in.h;
img_out.h = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.s = (float *)malloc(img_in.w * img_in.h * sizeof(float));
img_out.l = (unsigned char *)malloc(img_in.w * img_in.h * sizeof(unsigned char));
// allocate gpu memory
unsigned char *r, *g, *b;
float *h, *s;
unsigned char *l;
int size = img_in.w * img_in.h;
cudaMalloc((void **)&r, size*sizeof(unsigned char));
cudaMalloc((void **)&g, size*sizeof(unsigned char));
cudaMalloc((void **)&b, size*sizeof(unsigned char));
cudaMalloc((void **)&h, size*sizeof(float));
cudaMalloc((void **)&s, size*sizeof(float));
cudaMalloc((void **)&l, size*sizeof(unsigned char));
// copy memory from host to device
cudaMemcpy(r, img_in.img_r, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(g, img_in.img_g, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(b, img_in.img_b, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(h, img_out.h, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(s, img_out.s, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(l, img_out.l, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
rgb2hsl_call<<<BLOCKS, THREADS_PER_BLOCK>>>(r,g,b,h,s,l,img_in.w,img_in.h);
// copy memory from device to host
cudaMemcpy(img_out.h, h, size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.s, s, size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.l, l, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// free gpu memory
cudaFree(r);
cudaFree(g);
cudaFree(b);
cudaFree(h);
cudaFree(s);
cudaFree(l);
return img_out;
}
__global__ void rgb2hsl_call( unsigned char* in_r, unsigned char* in_g, unsigned char* in_b,
float* out_h, float* out_s, unsigned char* out_l, int w, int h )
{
int size = w*h;
int i;
float H, S, L;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK)
{
float var_r = ( (float)in_r[i]/255 );//Convert RGB to [0,1]
float var_g = ( (float)in_g[i]/255 );
float var_b = ( (float)in_b[i]/255 );
float var_min = (var_r < var_g) ? var_r : var_g;
var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB
float var_max = (var_r > var_g) ? var_r : var_g;
var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB
float del_max = var_max - var_min; //Delta RGB value
L = ( var_max + var_min ) / 2;
if ( del_max == 0 )//This is a gray, no chroma...
{
H = 0;
S = 0;
}
else //Chromatic data...
{
if ( L < 0.5 )
S = del_max/(var_max+var_min);
else
S = del_max/(2-var_max-var_min );
float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max;
float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max;
float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max;
if( var_r == var_max ){
H = del_b - del_g;
}
else{
if( var_g == var_max ){
H = (1.0/3.0) + del_r - del_b;
}
else{
H = (2.0/3.0) + del_g - del_r;
}
}
}
if ( H < 0 )
H += 1;
if ( H > 1 )
H -= 1;
out_h[i] = H;
out_s[i] = S;
out_l[i] = (unsigned char)(L*255);
}
}
float Hue_2_RGB( float v1, float v2, float vH ) //Function Hue_2_RGB
{
if ( vH < 0 ) vH += 1;
if ( vH > 1 ) vH -= 1;
if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH );
if ( ( 2 * vH ) < 1 ) return ( v2 );
if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 );
return ( v1 );
}
//Convert HSL to RGB, assume H, S in [0.0, 1.0] and L in [0, 255]
//Output R,G,B in [0, 255]
PPM_IMG hsl2rgb(HSL_IMG img_in)
{
int i;
PPM_IMG result;
result.w = img_in.width;
result.h = img_in.height;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
for(i = 0; i < img_in.width*img_in.height; i ++){
float H = img_in.h[i];
float S = img_in.s[i];
float L = img_in.l[i]/255.0f;
float var_1, var_2;
unsigned char r,g,b;
if ( S == 0 )
{
r = L * 255;
g = L * 255;
b = L * 255;
}
else
{
if ( L < 0.5 )
var_2 = L * ( 1 + S );
else
var_2 = ( L + S ) - ( S * L );
var_1 = 2 * L - var_2;
r = 255 * Hue_2_RGB( var_1, var_2, H + (1.0f/3.0f) );
g = 255 * Hue_2_RGB( var_1, var_2, H );
b = 255 * Hue_2_RGB( var_1, var_2, H - (1.0f/3.0f) );
}
result.img_r[i] = r;
result.img_g[i] = g;
result.img_b[i] = b;
}
return result;
}
// gpu version
PPM_IMG hsl2rgb_gpu(HSL_IMG img_in)
{
//int i;
PPM_IMG result;
result.w = img_in.width;
result.h = img_in.height;
result.img_r = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_g = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
result.img_b = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
// allocate gpu memory
float *h, *s;
unsigned char *l;
unsigned char *r, *g, *b;
int size = result.w*result.h;
cudaMalloc((void **)&r, size*sizeof(unsigned char));
cudaMalloc((void **)&g, size*sizeof(unsigned char));
cudaMalloc((void **)&b, size*sizeof(unsigned char));
cudaMalloc((void **)&h, size*sizeof(float));
cudaMalloc((void **)&s, size*sizeof(float));
cudaMalloc((void **)&l, size*sizeof(unsigned char));
// copy memory from host to device
cudaMemcpy(r, result.img_r, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(g, result.img_g, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(b, result.img_b, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(h, img_in.h, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(s, img_in.s, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(l, img_in.l, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
hsl2rgb_call<<<BLOCKS, THREADS_PER_BLOCK>>>(h,s,l,r,g,b,result.w,result.h);
// copy memory from device to host
cudaMemcpy(result.img_r, r, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(result.img_g, g, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(result.img_b, b, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// free gpu memory
cudaFree(r);
cudaFree(g);
cudaFree(b);
cudaFree(h);
cudaFree(s);
cudaFree(l);
return result;
}
__global__ void hsl2rgb_call( float *in_h, float *in_s, unsigned char *in_l,
unsigned char *out_r, unsigned char *out_g, unsigned char *out_b, int w, int h)
{
int i;
int size = w*h;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK){
float H = in_h[i];
float S = in_s[i];
float L = in_l[i]/255.0f;
float var_1, var_2;
unsigned char r,g,b;
if ( S == 0 )
{
r = L * 255;
g = L * 255;
b = L * 255;
}
else
{
if ( L < 0.5 )
var_2 = L * ( 1 + S );
else
var_2 = ( L + S ) - ( S * L );
var_1 = 2 * L - var_2;
float value[3];
float vH;
for (int j=0;j<3;j++)
{
vH = H - (j-1)*(1.0f/3.0f);
if ( vH < 0 ) vH += 1;
if ( vH > 1 ) vH -= 1;
if ( ( 6 * vH ) < 1 )
value[j] = var_1 + ( var_2 - var_1 ) * 6 * vH ;
else if ( ( 2 * vH ) < 1 )
value[j] = var_2 ;
else if ( ( 3 * vH ) < 2 )
value[j] = var_1 + ( var_2 - var_1 ) * ( ( 2.0f/3.0f ) - vH ) * 6;
else
value[j] = var_1;
}
//r = 255 * Hue_2_RGB( var_1, var_2, H + (1.0f/3.0f) );
//g = 255 * Hue_2_RGB( var_1, var_2, H );
//b = 255 * Hue_2_RGB( var_1, var_2, H - (1.0f/3.0f) );
r = 255 * value[0];
g = 255 * value[1];
b = 255 * value[2];
}
out_r[i] = r;
out_g[i] = g;
out_b[i] = b;
}
}
//Convert RGB to YUV, all components in [0, 255]
YUV_IMG rgb2yuv(PPM_IMG img_in)
{
YUV_IMG img_out;
int i;//, j;
unsigned char r, g, b;
unsigned char y, cb, cr;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
for(i = 0; i < img_out.w*img_out.h; i ++){
r = img_in.img_r[i];
g = img_in.img_g[i];
b = img_in.img_b[i];
y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128);
cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128);
img_out.img_y[i] = y;
img_out.img_u[i] = cb;
img_out.img_v[i] = cr;
}
return img_out;
}
// gpu version
YUV_IMG rgb2yuv_gpu(PPM_IMG img_in)
{
YUV_IMG img_out;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_y = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_u = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_v = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
// allocate memory on gpu
unsigned char *r, *g, *b, *y, *u, *v;
int size = img_out.w*img_out.h;
cudaMalloc((void **)&r, size*sizeof(unsigned char));
cudaMalloc((void **)&g, size*sizeof(unsigned char));
cudaMalloc((void **)&b, size*sizeof(unsigned char));
cudaMalloc((void **)&y, size*sizeof(unsigned char));
cudaMalloc((void **)&u, size*sizeof(unsigned char));
cudaMalloc((void **)&v, size*sizeof(unsigned char));
// copy memory from host to device
cudaMemcpy(r, img_in.img_r, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(g, img_in.img_g, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(b, img_in.img_b, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(y, img_out.img_y, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(u, img_out.img_u, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(v, img_out.img_v, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
// call gpu function
rgb2yuv_call<<<BLOCKS, THREADS_PER_BLOCK>>>(r,g,b,y,u,v,img_out.w,img_out.h);
// copy memory from device to host
cudaMemcpy(img_out.img_y, y, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.img_u, u, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.img_v, v, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// free gpu memory
cudaFree(r);
cudaFree(g);
cudaFree(b);
cudaFree(y);
cudaFree(u);
cudaFree(v);
return img_out;
}
__global__ void rgb2yuv_call( unsigned char *in_r, unsigned char *in_g, unsigned char *in_b,
unsigned char *out_y, unsigned char *out_u, unsigned char *out_v, int w, int h)
{
int i;
unsigned char r, g, b;
unsigned char y, cb, cr;
int size = w*h;
for(i = threadIdx.x; i < size; i += THREADS_PER_BLOCK){
r = in_r[i];
g = in_g[i];
b = in_b[i];
y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128);
cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128);
out_y[i] = y;
out_u[i] = cb;
out_v[i] = cr;
}
}
unsigned char clip_rgb(int x)
{
if(x > 255)
return 255;
if(x < 0)
return 0;
return (unsigned char)x;
}
//Convert YUV to RGB, all components in [0, 255]
PPM_IMG yuv2rgb(YUV_IMG img_in)
{
PPM_IMG img_out;
int i;
int rt,gt,bt;
int y, cb, cr;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
for(i = 0; i < img_out.w*img_out.h; i ++){
y = (int)img_in.img_y[i];
cb = (int)img_in.img_u[i] - 128;
cr = (int)img_in.img_v[i] - 128;
rt = (int)( y + 1.402*cr);
gt = (int)( y - 0.344*cb - 0.714*cr);
bt = (int)( y + 1.772*cb);
img_out.img_r[i] = clip_rgb(rt);
img_out.img_g[i] = clip_rgb(gt);
img_out.img_b[i] = clip_rgb(bt);
}
return img_out;
}
// gpu version
PPM_IMG yuv2rgb_gpu(YUV_IMG img_in)
{
PPM_IMG img_out;
img_out.w = img_in.w;
img_out.h = img_in.h;
img_out.img_r = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_g = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
img_out.img_b = (unsigned char *)malloc(sizeof(unsigned char)*img_out.w*img_out.h);
// allocate memory on gpu
unsigned char *r, *g, *b, *y, *u, *v;
int size = img_out.w*img_out.h;
cudaMalloc((void **)&r, size*sizeof(unsigned char));
cudaMalloc((void **)&g, size*sizeof(unsigned char));
cudaMalloc((void **)&b, size*sizeof(unsigned char));
cudaMalloc((void **)&y, size*sizeof(unsigned char));
cudaMalloc((void **)&u, size*sizeof(unsigned char));
cudaMalloc((void **)&v, size*sizeof(unsigned char));
// copy memory from host to device
cudaMemcpy(r, img_out.img_r, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(g, img_out.img_g, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(b, img_out.img_b, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(y, img_in.img_y, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(u, img_in.img_u, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(v, img_in.img_v, size*sizeof(unsigned char), cudaMemcpyHostToDevice);
// call gpu function
yuv2rgb_call<<<BLOCKS, THREADS_PER_BLOCK>>>(y,u,v,r,g,b,img_out.w,img_out.h);
// copy memory from device to host
cudaMemcpy(img_out.img_r, r, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.img_g, g, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(img_out.img_b, b, size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// free gpu memory
cudaFree(r);
cudaFree(g);
cudaFree(b);
cudaFree(y);
cudaFree(u);
cudaFree(v);
return img_out;
}
__global__ void yuv2rgb_call( unsigned char *in_y, unsigned char *in_u, unsigned char *in_v,
unsigned char *out_r, unsigned char *out_g, unsigned char *out_b, int w, int h)
{
int i;
int rt,gt,bt;
int y, cb, cr;
int size = w*h;
for(i = threadIdx.x; i < size; i +=THREADS_PER_BLOCK ){
y = (int)in_y[i];
cb = (int)in_u[i] - 128;
cr = (int)in_v[i] - 128;
rt = (int)( y + 1.402*cr);
gt = (int)( y - 0.344*cb - 0.714*cr);
bt = (int)( y + 1.772*cb);
//out_r[i] = clip_rgb(rt);
//out_g[i] = clip_rgb(gt);
//out_b[i] = clip_rgb(bt);
//out_r[i] = (unsigned char)( (rt>255?255:rt)<0?0:rt );
//out_g[i] = (unsigned char)( (gt>255?255:rt)<0?0:gt );
//out_b[i] = (unsigned char)( (bt>255?255:rt)<0?0:bt );
if(rt>255)
out_r[i] = 255;
else if(rt<0)
out_r[i] = 0;
else
out_r[i] = (unsigned char)(rt);
if(gt>255)
out_g[i] = 255;
else if(gt<0)
out_g[i] = 0;
else
out_g[i] = (unsigned char)(gt);
if(bt>255)
out_b[i] = 255;
else if(bt<0)
out_b[i] = 0;
else
out_b[i] = (unsigned char)(bt);
}
}
|
b9553f8dbb350d0761ce579f1801d4198048961d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MATRIX_TYPE int
#define BLOCK_SIZE 8
#define THREAD_SIZE 64
#define TILE 64
void stopwatch(int);
//CUDA
__global__ void cuda_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void shared_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void exam_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
int main()
{
//1024 by 1024
const int width = 1024;
const int height = width;
const int matrix_size = width*height;
const int buffer_size = matrix_size*sizeof(MATRIX_TYPE);
MATRIX_TYPE *host_A,*host_B,*host_C;
host_A = (MATRIX_TYPE*)malloc(buffer_size);
host_B = (MATRIX_TYPE*)malloc(buffer_size);
host_C = (MATRIX_TYPE*)malloc(buffer_size);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
printf("Multiply matrix (%dX%d ) * (%dX%d)\n",width,width,width,width);
MATRIX_TYPE *device_A,*device_B,*device_C;
dim3 Db(1024,1024,1);
hipMalloc((void**)&device_A,buffer_size );
hipMalloc((void**)&device_B,buffer_size );
hipMalloc((void**)&device_C,buffer_size );
printf("cuda_mul\n");
stopwatch(0);
hipMemcpy(device_A,host_A,buffer_size,hipMemcpyHostToDevice);
hipMemcpy(device_B,host_B,buffer_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_mul), dim3(1),dim3(Db), 0, 0, device_A,device_B,device_C,width);
hipMemcpy(host_C,device_C,buffer_size,hipMemcpyDeviceToHost);
stopwatch(1);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
dim3 Sg(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 Sb(THREAD_SIZE,THREAD_SIZE,1);
printf("shared_mul\n");
stopwatch(0);
hipMemcpy(device_A,host_A,buffer_size,hipMemcpyHostToDevice);
hipMemcpy(device_B,host_B,buffer_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( exam_mul), dim3(1),dim3(Db), 0, 0, device_A,device_B,device_C,width);
hipMemcpy(host_C,device_C,buffer_size,hipMemcpyDeviceToHost);
stopwatch(1);
hipFree(device_A);
hipFree(device_B);
hipFree(device_C);
free(host_A);
free(host_B);
free(host_C);
return 0;
}
__global__ void cuda_mul(MATRIX_TYPE* A, MATRIX_TYPE* B, MATRIX_TYPE* C, int w)
{
MATRIX_TYPE v;
v = 0;
for(int i =0;i<w;i++)
{
v += A[threadIdx.y*w + i] * B[threadIdx.x *w + i];
}
C[threadIdx.x *w + threadIdx.y] = v;
}
__global__ void shared_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
/*
Dg(16,16,1)
Db(64,64,1)
0,0 1,0
---------
|
0,1 | 1,1
|
1 0 2 4
1
0
2
4
*/
__shared__ MATRIX_TYPE SA[THREAD_SIZE][THREAD_SIZE];
__shared__ MATRIX_TYPE SB[THREAD_SIZE][THREAD_SIZE];
MATRIX_TYPE v;
SA[threadIdx.x][threadIdx.y] = A[blockIdx.y *w +blockIdx.x];
SB[threadIdx.x][threadIdx.y] = B[blockIdx.x *w +blockIdx.y];
v = 0;
/*
A B
O O O O X X X X
O O O O O O O O
X O O O O O O O
O O O O O O O O
*/
}
void stopwatch(int flag)
{
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long Diff = 0;
//start
if(flag == 0)
{
Diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
Diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
printf("elapsed time : % lld micros\n",Diff/1000);
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
__global__ void exam_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = w * TILE * by;
int aEnd = aBegin +w -1;
int aStep = TILE;
int bBegin = TILE *bx;
int bStep = TILE * w;
MATRIX_TYPE Csub = 0;
for(int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b+= bStep)
{
__shared__ MATRIX_TYPE As[TILE][TILE];
__shared__ MATRIX_TYPE Bs[TILE][TILE];
As[ty][tx] = A[a + w * ty + tx];
Bs[ty][tx] = B[b + w * ty + tx];
__syncthreads();
for(int k=0;k<TILE;k++)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = w * TILE * by + TILE * bx;
C[c + w * ty + ty] = Csub;
}
| b9553f8dbb350d0761ce579f1801d4198048961d.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MATRIX_TYPE int
#define BLOCK_SIZE 8
#define THREAD_SIZE 64
#define TILE 64
void stopwatch(int);
//CUDA 배열 곱
__global__ void cuda_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void shared_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void exam_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
int main()
{
//1024 by 1024 행렬
const int width = 1024;
const int height = width;
const int matrix_size = width*height;
const int buffer_size = matrix_size*sizeof(MATRIX_TYPE);
MATRIX_TYPE *host_A,*host_B,*host_C;
host_A = (MATRIX_TYPE*)malloc(buffer_size);
host_B = (MATRIX_TYPE*)malloc(buffer_size);
host_C = (MATRIX_TYPE*)malloc(buffer_size);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
printf("Multiply matrix (%dX%d ) * (%dX%d)\n",width,width,width,width);
MATRIX_TYPE *device_A,*device_B,*device_C;
dim3 Db(1024,1024,1);
cudaMalloc((void**)&device_A,buffer_size );
cudaMalloc((void**)&device_B,buffer_size );
cudaMalloc((void**)&device_C,buffer_size );
printf("cuda_mul\n");
stopwatch(0);
cudaMemcpy(device_A,host_A,buffer_size,cudaMemcpyHostToDevice);
cudaMemcpy(device_B,host_B,buffer_size,cudaMemcpyHostToDevice);
cuda_mul<<<1,Db>>>(device_A,device_B,device_C,width);
cudaMemcpy(host_C,device_C,buffer_size,cudaMemcpyDeviceToHost);
stopwatch(1);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
dim3 Sg(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 Sb(THREAD_SIZE,THREAD_SIZE,1);
printf("shared_mul\n");
stopwatch(0);
cudaMemcpy(device_A,host_A,buffer_size,cudaMemcpyHostToDevice);
cudaMemcpy(device_B,host_B,buffer_size,cudaMemcpyHostToDevice);
exam_mul<<<1,Db>>>(device_A,device_B,device_C,width);
cudaMemcpy(host_C,device_C,buffer_size,cudaMemcpyDeviceToHost);
stopwatch(1);
cudaFree(device_A);
cudaFree(device_B);
cudaFree(device_C);
free(host_A);
free(host_B);
free(host_C);
return 0;
}
__global__ void cuda_mul(MATRIX_TYPE* A, MATRIX_TYPE* B, MATRIX_TYPE* C, int w)
{
MATRIX_TYPE v;
v = 0;
for(int i =0;i<w;i++)
{
v += A[threadIdx.y*w + i] * B[threadIdx.x *w + i];
}
C[threadIdx.x *w + threadIdx.y] = v;
}
__global__ void shared_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
/*
Dg(16,16,1)
Db(64,64,1)
0,0 1,0
---------
|
0,1 | 1,1
|
1 0 2 4
1
0
2
4
*/
__shared__ MATRIX_TYPE SA[THREAD_SIZE][THREAD_SIZE];
__shared__ MATRIX_TYPE SB[THREAD_SIZE][THREAD_SIZE];
MATRIX_TYPE v;
SA[threadIdx.x][threadIdx.y] = A[blockIdx.y *w +blockIdx.x];
SB[threadIdx.x][threadIdx.y] = B[blockIdx.x *w +blockIdx.y];
v = 0;
/*
A 의 한 타일을 사용하는 모든 B의 타일들을 연산
O O O O X X X X
O O O O O O O O
X O O O O O O O
O O O O O O O O
*/
}
void stopwatch(int flag)
{
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long Diff = 0;
//start
if(flag == 0)
{
Diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
Diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
printf("elapsed time : % lld micros\n",Diff/1000);
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
__global__ void exam_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = w * TILE * by;
int aEnd = aBegin +w -1;
int aStep = TILE;
int bBegin = TILE *bx;
int bStep = TILE * w;
MATRIX_TYPE Csub = 0;
for(int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b+= bStep)
{
__shared__ MATRIX_TYPE As[TILE][TILE];
__shared__ MATRIX_TYPE Bs[TILE][TILE];
As[ty][tx] = A[a + w * ty + tx];
Bs[ty][tx] = B[b + w * ty + tx];
__syncthreads();
for(int k=0;k<TILE;k++)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = w * TILE * by + TILE * bx;
C[c + w * ty + ty] = Csub;
}
|
58f1742b919ec8427928abb52f08a9f5d32d860e.hip | // !!! This is a file automatically generated by hipify!!!
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <hip/hip_runtime.h>
#include <bitset>
#include <iostream>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
const unsigned int bitdepth = 8*sizeof(unsigned int);
const unsigned int numBits = 1;
const unsigned int numBins = 1 << numBits;
/*
__global__ void histo_offset (unsigned int* d_inputVals,
unsigned int* d_binHistogram,
unsigned int* d_offsetvec,
const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int currVal = d_inputVals[myID1D];
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (currVal & mask) >> i;
d_offsetvec[myID1D] = atomicAdd(& d_binHistogram[bin], 1);
}
*/
__global__ void compact (unsigned int* d_inputVals,
unsigned int* d_compact_out,
const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems){
d_compact_out[myID1D] = 0;
}
else{
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (d_inputVals[myID1D] & mask) >> i;
d_compact_out[myID1D] = !bin;
}
}
__global__ void blelloch_scan (unsigned int *d_bins_io,
const size_t numBins)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
const int thnum = blockDim.x*blockDim.y;
//printf("overall ID is %d, at tid %d, difference is %d\n", myID1D, tid, myID1D-tid);
for (unsigned int i = 2; i<=numBins; i<<=1){
if ((tid+1)%i == 0){
unsigned int step = i>>1;
d_bins_io[myID1D] += d_bins_io[myID1D-step];
}
__syncthreads();
}
if (tid == thnum-1) d_bins_io[myID1D] = 0;
__syncthreads();
for (unsigned int j = numBins; j>0; j>>=1){
if ((tid+1)%j == 0){
unsigned int step2 = j>>1;
unsigned int right = d_bins_io[myID1D];
d_bins_io[myID1D] += d_bins_io[myID1D-step2];
d_bins_io[myID1D-step2] = right;
}
__syncthreads();
}
}
__global__ void blelloch_patch (unsigned int* d_bins_io, unsigned int* d_inputVals,
const size_t num_scanPatch, const size_t numElems)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
const int idx = (myID1D+1)*num_scanPatch;
if (idx >= numElems)
return;
d_bins_io[idx] = d_bins_io[idx-1] + d_inputVals[idx-1];
}
__global__ void hillis_postprocess (unsigned int *d_bins_io, const size_t numscanPatch,
const size_t numBins, const size_t numElems)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
if (myID1D*numscanPatch >= numElems)
return;
for (unsigned int i = 1; i<=numBins; i<<=1){
unsigned int right = 0;
if ((tid+1)>i){
right = d_bins_io[myID1D*numscanPatch] + d_bins_io[(myID1D-i)*numscanPatch];
}
__syncthreads();
if ((tid+1)>i){
d_bins_io[myID1D*numscanPatch] = right;
}
__syncthreads();
}
}
__global__ void global_correct (unsigned int *d_bins_io, const size_t numscanPatch)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int blockid = blockIdx.x + blockIdx.y*gridDim.x;
const int myID1D = blockid*blockDim.x*blockDim.y + tid;
const int thnum = blockDim.x*blockDim.y;
if (tid >= numscanPatch || tid == 0)
return;
d_bins_io[myID1D] += d_bins_io[blockid*numscanPatch];
}
__global__ void swapLocs (unsigned int* d_inputVals, unsigned int* d_inputPos,
unsigned int* d_outputVals, unsigned int* d_outputPos, unsigned int* d_eqnil,
unsigned int* d_offsetvec, const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int totFalse = d_offsetvec[numElems];
unsigned int currVal = d_inputVals[myID1D];
unsigned int currPos = d_inputPos[myID1D];
unsigned int newidx = 0;
if (d_eqnil[myID1D]){
newidx = d_offsetvec[myID1D];
}else{
newidx = myID1D - d_offsetvec[myID1D] + totFalse;
}
d_outputVals[newidx] = currVal;
d_outputPos[newidx] = currPos;
}
__global__ void old_prototype (unsigned int* d_inputVals, unsigned int* const d_inputPos,
unsigned int* d_outputVals, unsigned int* d_outputPos,
unsigned int* d_binHistogram,
unsigned int* d_offsetvec,
unsigned int* d_binScan,
const size_t numElems)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int currVal = d_inputVals[myID1D];
unsigned int currPos = d_inputPos[myID1D];
for (unsigned int i=0;i<bitdepth;i+=numBits){
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (currVal & mask) >> i;
d_offsetvec[myID1D] = atomicAdd(& d_binHistogram[bin], 1);
__syncthreads();
if (myID1D == 0){
d_binScan[0] = 0;
for (unsigned int j=1;j<numBins;j++){
d_binScan[j] = d_binScan[j-1] + d_binHistogram[j-1];
printf("d_bins at %d is %d\n", j, d_binScan[j]);
d_binHistogram[j-1] = 0;
}
d_binHistogram[numBins-1] = 0;
}
__syncthreads();
unsigned int newidx = d_binScan[bin] + d_offsetvec[myID1D];
//if (newidx>=numElems)
//printf("newidx overflowed: %d\n", newidx);
//d_outputVals[newidx] = currVal;
//d_outputPos[newidx] = currPos;
__syncthreads();
currVal = d_outputVals[myID1D];
currPos = d_outputPos[myID1D];
}
}
void prefix_sum_scan (unsigned int* d_io_offset, unsigned int numthreads,
const size_t numElems, const int thread_x, const int thread_y, const int grids)
{
unsigned int *d_init;
checkCudaErrors(hipMalloc(&d_init, numElems*sizeof(unsigned int)));
checkCudaErrors(hipMemcpy(d_init, d_io_offset, numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
hipLaunchKernelGGL(( blelloch_scan), dim3(gridSize), dim3(blockSize), 0, 0, d_io_offset, numthreads);
hipLaunchKernelGGL(( blelloch_patch), dim3(1), dim3(gridSize), 0, 0, d_io_offset, d_init, numthreads, numElems);
hipLaunchKernelGGL(( hillis_postprocess), dim3(1), dim3(gridSize), 0, 0, d_io_offset, numthreads, grids, numElems);
hipLaunchKernelGGL(( global_correct), dim3(gridSize), dim3(blockSize), 0, 0, d_io_offset, numthreads);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
/*
unsigned int numtest = 255;
unsigned int *h_testvec = new unsigned int[numtest];
for (unsigned int u=0;u<numtest;u++){
h_testvec[u] = u;
}
const int thread_x = 32;
const int thread_y = 32;
unsigned int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numtest/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *h_resvec = new unsigned int[newsize];
unsigned int *d_offset, *d_swapBufV, *d_swapBufP;
printf("numBins is %d\n", numBins);
printf("numElems is %d\n", (int) numElems);
checkCudaErrors(hipMalloc(&d_offset, newsize*sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_swapBufV, numtest*sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_swapBufP, numtest*sizeof(unsigned int)));
checkCudaErrors(hipMemcpy(d_outputVals, h_testvec, numtest*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, h_testvec, numtest*sizeof(unsigned int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( compact), dim3(gridSize), dim3(blockSize), 0, 0, d_outputVals, d_offset, numtest, 0);
prefix_sum_scan(d_offset, numthreads, newsize, thread_x, thread_y, grids);
checkCudaErrors(hipMemcpy(h_resvec, d_offset, newsize*sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int j=0;j<newsize;j++){
if(j<numtest)
std::cout << "output is " << std::bitset<16>(h_testvec[j]);
else
std::cout << "output is none";
printf(" at %d, offset is %d\n", j, h_resvec[j]);
}
*/
const int thread_x = 32;
const int thread_y = 32;
unsigned int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numElems/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *d_eqnil, *d_offset, *d_swapBufV, *d_swapBufP;
printf("numBins is %d\n", numBins);
printf("numElems is %d\n", (int) numElems);
checkCudaErrors(hipMalloc(&d_offset, newsize*sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_eqnil, newsize*sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_swapBufV, numElems*sizeof(unsigned int)));
checkCudaErrors(hipMalloc(&d_swapBufP, numElems*sizeof(unsigned int)));
checkCudaErrors(hipMemcpy(d_outputVals, d_inputVals, numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, d_inputPos, numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
for (unsigned int i=0;i<bitdepth;i+=numBits)
{
hipLaunchKernelGGL(( compact), dim3(gridSize), dim3(blockSize), 0, 0, d_outputVals, d_eqnil, numElems, i);
checkCudaErrors(hipMemcpy(d_offset, d_eqnil, newsize*sizeof(unsigned int), hipMemcpyDeviceToDevice));
prefix_sum_scan(d_offset, numthreads, newsize, thread_x, thread_y, grids);
hipLaunchKernelGGL(( swapLocs), dim3(gridSize), dim3(blockSize), 0, 0, d_outputVals, d_outputPos, d_swapBufV, d_swapBufP, d_eqnil, d_offset, numElems, i);
checkCudaErrors(hipMemcpy(d_outputVals, d_swapBufV, numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_outputPos, d_swapBufP, numElems*sizeof(unsigned int), hipMemcpyDeviceToDevice));
checkCudaErrors(hipGetLastError());
/*
unsigned int *h_binScan = new unsigned int[numBins];
checkCudaErrors(hipMemcpy(h_binScan, d_binHisto, numBins*sizeof(unsigned int), hipMemcpyDeviceToHost));
unsigned int sumBins = 0;
for (int j=0;j<numBins;j++){
sumBins+=h_binScan[j];
}
printf("sumBins = %d", sumBins);
checkCudaErrors(hipMemcpy(h_testvec, d_outputVals, numtest*sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int j=0;j<numtest;j++){
std::cout << "output is " << std::bitset<16>(h_testvec[j]);
printf(" at %d, stage = %d\n", j, i);
}*/
}
/*
unsigned int numtest = 3596;
unsigned int *h_testvec = new unsigned int[numtest];
unsigned int *h_resvec = new unsigned int[numtest];
for (unsigned int u=0;u<numtest;u++){
h_testvec[u] = 1;
}
const int thread_x = 32;
const int thread_y = 32;
int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numtest/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, grids, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *d_init;
checkCudaErrors(hipMalloc(&d_init, newsize*sizeof(unsigned int)));
checkCudaErrors(hipMemcpy(d_outputVals, h_testvec, newsize*sizeof(unsigned int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_init, h_testvec, newsize*sizeof(unsigned int), hipMemcpyHostToDevice));
prefix_sum_scan(d_outputVals, numthreads, newsize, thread_x, thread_y, grids);
checkCudaErrors(hipMemcpy(h_resvec, d_outputVals, numtest*sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int j=0;j<numtest;j++){
printf("sorted h at %d is %d, original is %d\n", j, h_resvec[j], h_testvec[j]);
}
*/
}
| 58f1742b919ec8427928abb52f08a9f5d32d860e.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <cuda_runtime.h>
#include <bitset>
#include <iostream>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
const unsigned int bitdepth = 8*sizeof(unsigned int);
const unsigned int numBits = 1;
const unsigned int numBins = 1 << numBits;
/*
__global__ void histo_offset (unsigned int* d_inputVals,
unsigned int* d_binHistogram,
unsigned int* d_offsetvec,
const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int currVal = d_inputVals[myID1D];
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (currVal & mask) >> i;
d_offsetvec[myID1D] = atomicAdd(& d_binHistogram[bin], 1);
}
*/
__global__ void compact (unsigned int* d_inputVals,
unsigned int* d_compact_out,
const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems){
d_compact_out[myID1D] = 0;
}
else{
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (d_inputVals[myID1D] & mask) >> i;
d_compact_out[myID1D] = !bin;
}
}
__global__ void blelloch_scan (unsigned int *d_bins_io,
const size_t numBins)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
const int thnum = blockDim.x*blockDim.y;
//printf("overall ID is %d, at tid %d, difference is %d\n", myID1D, tid, myID1D-tid);
for (unsigned int i = 2; i<=numBins; i<<=1){
if ((tid+1)%i == 0){
unsigned int step = i>>1;
d_bins_io[myID1D] += d_bins_io[myID1D-step];
}
__syncthreads();
}
if (tid == thnum-1) d_bins_io[myID1D] = 0;
__syncthreads();
for (unsigned int j = numBins; j>0; j>>=1){
if ((tid+1)%j == 0){
unsigned int step2 = j>>1;
unsigned int right = d_bins_io[myID1D];
d_bins_io[myID1D] += d_bins_io[myID1D-step2];
d_bins_io[myID1D-step2] = right;
}
__syncthreads();
}
}
__global__ void blelloch_patch (unsigned int* d_bins_io, unsigned int* d_inputVals,
const size_t num_scanPatch, const size_t numElems)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
const int idx = (myID1D+1)*num_scanPatch;
if (idx >= numElems)
return;
d_bins_io[idx] = d_bins_io[idx-1] + d_inputVals[idx-1];
}
__global__ void hillis_postprocess (unsigned int *d_bins_io, const size_t numscanPatch,
const size_t numBins, const size_t numElems)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int myID1D = (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x*blockDim.y + tid;
if (myID1D*numscanPatch >= numElems)
return;
for (unsigned int i = 1; i<=numBins; i<<=1){
unsigned int right = 0;
if ((tid+1)>i){
right = d_bins_io[myID1D*numscanPatch] + d_bins_io[(myID1D-i)*numscanPatch];
}
__syncthreads();
if ((tid+1)>i){
d_bins_io[myID1D*numscanPatch] = right;
}
__syncthreads();
}
}
__global__ void global_correct (unsigned int *d_bins_io, const size_t numscanPatch)
{
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int blockid = blockIdx.x + blockIdx.y*gridDim.x;
const int myID1D = blockid*blockDim.x*blockDim.y + tid;
const int thnum = blockDim.x*blockDim.y;
if (tid >= numscanPatch || tid == 0)
return;
d_bins_io[myID1D] += d_bins_io[blockid*numscanPatch];
}
__global__ void swapLocs (unsigned int* d_inputVals, unsigned int* d_inputPos,
unsigned int* d_outputVals, unsigned int* d_outputPos, unsigned int* d_eqnil,
unsigned int* d_offsetvec, const size_t numElems, unsigned int i)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int totFalse = d_offsetvec[numElems];
unsigned int currVal = d_inputVals[myID1D];
unsigned int currPos = d_inputPos[myID1D];
unsigned int newidx = 0;
if (d_eqnil[myID1D]){
newidx = d_offsetvec[myID1D];
}else{
newidx = myID1D - d_offsetvec[myID1D] + totFalse;
}
d_outputVals[newidx] = currVal;
d_outputPos[newidx] = currPos;
}
__global__ void old_prototype (unsigned int* d_inputVals, unsigned int* const d_inputPos,
unsigned int* d_outputVals, unsigned int* d_outputPos,
unsigned int* d_binHistogram,
unsigned int* d_offsetvec,
unsigned int* d_binScan,
const size_t numElems)
{
const int2 myID = make_int2(threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y);
const int myID1D = myID.y*gridDim.x*blockDim.x + myID.x;
if (myID1D >= numElems)
return;
unsigned int currVal = d_inputVals[myID1D];
unsigned int currPos = d_inputPos[myID1D];
for (unsigned int i=0;i<bitdepth;i+=numBits){
unsigned int mask = (numBins - 1) << i;
unsigned int bin = (currVal & mask) >> i;
d_offsetvec[myID1D] = atomicAdd(& d_binHistogram[bin], 1);
__syncthreads();
if (myID1D == 0){
d_binScan[0] = 0;
for (unsigned int j=1;j<numBins;j++){
d_binScan[j] = d_binScan[j-1] + d_binHistogram[j-1];
printf("d_bins at %d is %d\n", j, d_binScan[j]);
d_binHistogram[j-1] = 0;
}
d_binHistogram[numBins-1] = 0;
}
__syncthreads();
unsigned int newidx = d_binScan[bin] + d_offsetvec[myID1D];
//if (newidx>=numElems)
//printf("newidx overflowed: %d\n", newidx);
//d_outputVals[newidx] = currVal;
//d_outputPos[newidx] = currPos;
__syncthreads();
currVal = d_outputVals[myID1D];
currPos = d_outputPos[myID1D];
}
}
void prefix_sum_scan (unsigned int* d_io_offset, unsigned int numthreads,
const size_t numElems, const int thread_x, const int thread_y, const int grids)
{
unsigned int *d_init;
checkCudaErrors(cudaMalloc(&d_init, numElems*sizeof(unsigned int)));
checkCudaErrors(cudaMemcpy(d_init, d_io_offset, numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
blelloch_scan<<<gridSize, blockSize>>>(d_io_offset, numthreads);
blelloch_patch<<<1, gridSize>>>(d_io_offset, d_init, numthreads, numElems);
hillis_postprocess<<<1, gridSize>>>(d_io_offset, numthreads, grids, numElems);
global_correct<<<gridSize, blockSize>>>(d_io_offset, numthreads);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
/*
unsigned int numtest = 255;
unsigned int *h_testvec = new unsigned int[numtest];
for (unsigned int u=0;u<numtest;u++){
h_testvec[u] = u;
}
const int thread_x = 32;
const int thread_y = 32;
unsigned int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numtest/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *h_resvec = new unsigned int[newsize];
unsigned int *d_offset, *d_swapBufV, *d_swapBufP;
printf("numBins is %d\n", numBins);
printf("numElems is %d\n", (int) numElems);
checkCudaErrors(cudaMalloc(&d_offset, newsize*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_swapBufV, numtest*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_swapBufP, numtest*sizeof(unsigned int)));
checkCudaErrors(cudaMemcpy(d_outputVals, h_testvec, numtest*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, h_testvec, numtest*sizeof(unsigned int), cudaMemcpyHostToDevice));
compact<<<gridSize, blockSize>>>(d_outputVals, d_offset, numtest, 0);
prefix_sum_scan(d_offset, numthreads, newsize, thread_x, thread_y, grids);
checkCudaErrors(cudaMemcpy(h_resvec, d_offset, newsize*sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int j=0;j<newsize;j++){
if(j<numtest)
std::cout << "output is " << std::bitset<16>(h_testvec[j]);
else
std::cout << "output is none";
printf(" at %d, offset is %d\n", j, h_resvec[j]);
}
*/
const int thread_x = 32;
const int thread_y = 32;
unsigned int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numElems/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, 1, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *d_eqnil, *d_offset, *d_swapBufV, *d_swapBufP;
printf("numBins is %d\n", numBins);
printf("numElems is %d\n", (int) numElems);
checkCudaErrors(cudaMalloc(&d_offset, newsize*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_eqnil, newsize*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_swapBufV, numElems*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc(&d_swapBufP, numElems*sizeof(unsigned int)));
checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals, numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, d_inputPos, numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
for (unsigned int i=0;i<bitdepth;i+=numBits)
{
compact<<<gridSize, blockSize>>>(d_outputVals, d_eqnil, numElems, i);
checkCudaErrors(cudaMemcpy(d_offset, d_eqnil, newsize*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
prefix_sum_scan(d_offset, numthreads, newsize, thread_x, thread_y, grids);
swapLocs<<<gridSize, blockSize>>>(d_outputVals, d_outputPos, d_swapBufV, d_swapBufP, d_eqnil, d_offset, numElems, i);
checkCudaErrors(cudaMemcpy(d_outputVals, d_swapBufV, numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_outputPos, d_swapBufP, numElems*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaGetLastError());
/*
unsigned int *h_binScan = new unsigned int[numBins];
checkCudaErrors(cudaMemcpy(h_binScan, d_binHisto, numBins*sizeof(unsigned int), cudaMemcpyDeviceToHost));
unsigned int sumBins = 0;
for (int j=0;j<numBins;j++){
sumBins+=h_binScan[j];
}
printf("sumBins = %d", sumBins);
checkCudaErrors(cudaMemcpy(h_testvec, d_outputVals, numtest*sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int j=0;j<numtest;j++){
std::cout << "output is " << std::bitset<16>(h_testvec[j]);
printf(" at %d, stage = %d\n", j, i);
}*/
}
/*
unsigned int numtest = 3596;
unsigned int *h_testvec = new unsigned int[numtest];
unsigned int *h_resvec = new unsigned int[numtest];
for (unsigned int u=0;u<numtest;u++){
h_testvec[u] = 1;
}
const int thread_x = 32;
const int thread_y = 32;
int numthreads = thread_x*thread_y;
const int grids = (int)ceil((float)numtest/(float)numthreads);
printf("grids is %d\n", grids);
const dim3 blockSize(thread_x, thread_y, 1);
const dim3 gridSize(grids, grids, 1);
const unsigned int newsize = grids*numthreads;
unsigned int *d_init;
checkCudaErrors(cudaMalloc(&d_init, newsize*sizeof(unsigned int)));
checkCudaErrors(cudaMemcpy(d_outputVals, h_testvec, newsize*sizeof(unsigned int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_init, h_testvec, newsize*sizeof(unsigned int), cudaMemcpyHostToDevice));
prefix_sum_scan(d_outputVals, numthreads, newsize, thread_x, thread_y, grids);
checkCudaErrors(cudaMemcpy(h_resvec, d_outputVals, numtest*sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int j=0;j<numtest;j++){
printf("sorted h at %d is %d, original is %d\n", j, h_resvec[j], h_testvec[j]);
}
*/
}
|
7125ebbb0c9d1db476609e037e02a17ad5a2fb13.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime_api.h>
#include <utility>
#include <vector>
#include "dali/operators/image/color/color_space_conversion.h"
#include "dali/kernels/imgproc/color_manipulation/color_space_conversion_kernel.cuh"
#include "dali/core/dev_buffer.h"
#include "dali/core/geom/vec.h"
namespace dali {
template<>
void ColorSpaceConversion<GPUBackend>::RunImpl(Workspace &ws) {
const auto& input = ws.Input<GPUBackend>(0);
auto& output = ws.Output<GPUBackend>(0);
output.SetLayout(input.GetLayout());
auto in_view = view<const uint8_t>(input);
auto out_view = view<uint8_t>(output);
const auto &in_sh = in_view.shape;
int nsamples = in_sh.num_samples();
auto stream = ws.stream();
for (int i = 0; i < nsamples; ++i) {
auto sample_sh = in_sh.tensor_shape_span(i);
int64_t npixels = volume(sample_sh.begin(), sample_sh.end() - 1);
kernels::color::RunColorSpaceConversionKernel(out_view[i].data, in_view[i].data, output_type_,
input_type_, npixels, stream);
}
}
DALI_REGISTER_OPERATOR(ColorSpaceConversion, ColorSpaceConversion<GPUBackend>, GPU);
} // namespace dali
| 7125ebbb0c9d1db476609e037e02a17ad5a2fb13.cu | // Copyright (c) 2018-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime_api.h>
#include <utility>
#include <vector>
#include "dali/operators/image/color/color_space_conversion.h"
#include "dali/kernels/imgproc/color_manipulation/color_space_conversion_kernel.cuh"
#include "dali/core/dev_buffer.h"
#include "dali/core/geom/vec.h"
namespace dali {
template<>
void ColorSpaceConversion<GPUBackend>::RunImpl(Workspace &ws) {
const auto& input = ws.Input<GPUBackend>(0);
auto& output = ws.Output<GPUBackend>(0);
output.SetLayout(input.GetLayout());
auto in_view = view<const uint8_t>(input);
auto out_view = view<uint8_t>(output);
const auto &in_sh = in_view.shape;
int nsamples = in_sh.num_samples();
auto stream = ws.stream();
for (int i = 0; i < nsamples; ++i) {
auto sample_sh = in_sh.tensor_shape_span(i);
int64_t npixels = volume(sample_sh.begin(), sample_sh.end() - 1);
kernels::color::RunColorSpaceConversionKernel(out_view[i].data, in_view[i].data, output_type_,
input_type_, npixels, stream);
}
}
DALI_REGISTER_OPERATOR(ColorSpaceConversion, ColorSpaceConversion<GPUBackend>, GPU);
} // namespace dali
|
e659954e6dd0c5791a4c330f217eef505fc288f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* is_kernel_mv.cu
*
* Created on: 18-Feb-2009
* Author: alee
*/
#include "matrix.ch"
__constant__ float args_p[NUM_AP];
__constant__ float args_q[NUM_AQ];
template <int D>
__global__ void FUNC(is_gpu, TYPE)(int size, float* d_array, float* d_warray, int log) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float p, q;
float* x;
for (i = tid; i < size; i += tt) {
x = d_vector_get(d_array, D, i);
if (log) {
p = LOG_TARGET<D>(x, args_p);
q = LOG_PROPOSAL<D>(x, args_q);
d_warray[i] = p - q;
} else {
p = TARGET<D>(x, args_p);
q = PROPOSAL<D>(x, args_q);
d_warray[i] = p / q;
}
}
}
template <int D>
void FUNC( is, TYPE)(
int size, float* d_array, float* d_warray, float* h_args_p, float* h_args_q, int log, int nb, int nt) {
hipMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float));
hipMemcpyToSymbol(args_q, h_args_q, NUM_AQ * sizeof(float));
FUNC(is_gpu,hipLaunchKernelGGL(( TYPE)<D>), dim3(nb),dim3(nt), 0, 0, size, d_array, d_warray, log);
}
| e659954e6dd0c5791a4c330f217eef505fc288f6.cu | /*
* is_kernel_mv.cu
*
* Created on: 18-Feb-2009
* Author: alee
*/
#include "matrix.ch"
__constant__ float args_p[NUM_AP];
__constant__ float args_q[NUM_AQ];
template <int D>
__global__ void FUNC(is_gpu, TYPE)(int size, float* d_array, float* d_warray, int log) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i;
float p, q;
float* x;
for (i = tid; i < size; i += tt) {
x = d_vector_get(d_array, D, i);
if (log) {
p = LOG_TARGET<D>(x, args_p);
q = LOG_PROPOSAL<D>(x, args_q);
d_warray[i] = p - q;
} else {
p = TARGET<D>(x, args_p);
q = PROPOSAL<D>(x, args_q);
d_warray[i] = p / q;
}
}
}
template <int D>
void FUNC( is, TYPE)(
int size, float* d_array, float* d_warray, float* h_args_p, float* h_args_q, int log, int nb, int nt) {
cudaMemcpyToSymbol(args_p, h_args_p, NUM_AP * sizeof(float));
cudaMemcpyToSymbol(args_q, h_args_q, NUM_AQ * sizeof(float));
FUNC(is_gpu, TYPE)<D><<<nb,nt>>>(size, d_array, d_warray, log);
}
|
b526f0a478546b706711a31d0dfa00eaa794de1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void HydroComputedUx_CUDA3_kernel(float *FluxD, float *FluxS1, float *FluxS2, float *FluxS3, float *FluxTau, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float dtdx, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid < 2 || igrid > size - 3)
return;
int igridp1 = igrid + 1;
dUD [igrid] = (FluxD [igrid] - FluxD [igridp1])*dtdx;
dUS1 [igrid] = (FluxS1 [igrid] - FluxS1 [igridp1])*dtdx;
dUS2 [igrid] = (FluxS2 [igrid] - FluxS2 [igridp1])*dtdx;
dUS3 [igrid] = (FluxS3 [igrid] - FluxS3 [igridp1])*dtdx;
dUTau[igrid] = (FluxTau[igrid] - FluxTau[igridp1])*dtdx;
} | b526f0a478546b706711a31d0dfa00eaa794de1e.cu | #include "includes.h"
__global__ void HydroComputedUx_CUDA3_kernel(float *FluxD, float *FluxS1, float *FluxS2, float *FluxS3, float *FluxTau, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float dtdx, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid < 2 || igrid > size - 3)
return;
int igridp1 = igrid + 1;
dUD [igrid] = (FluxD [igrid] - FluxD [igridp1])*dtdx;
dUS1 [igrid] = (FluxS1 [igrid] - FluxS1 [igridp1])*dtdx;
dUS2 [igrid] = (FluxS2 [igrid] - FluxS2 [igridp1])*dtdx;
dUS3 [igrid] = (FluxS3 [igrid] - FluxS3 [igridp1])*dtdx;
dUTau[igrid] = (FluxTau[igrid] - FluxTau[igridp1])*dtdx;
} |
f42413bc53b46fad8979f6a6c398d376ab643f06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <vector>
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
std::cerr << "GPUassert: " << hipGetErrorString(code) << " " << file << " " << line << std::endl;
if (abort) exit(code);
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define EPSILON 1.0e-7
template<typename real_t>
using hvector = std::vector<real_t>;
template<typename real_t>
struct dvector{
real_t *data;
dvector(hvector<real_t>& v){
hipMalloc(&data, v.size() * sizeof(real_t));
hipMemcpy(data, v.data(), v.size() * sizeof(real_t), hipMemcpyHostToDevice);
}
~dvector(){hipFree(data);}
void to_vector(hvector<real_t>& v){ hipMemcpy(v.data(), data, v.size() * sizeof(real_t)); }
};
/**
* ORIGINAL
* */
template<typename real_t>
__global__ void vector_sum(real_t* a, real_t* b, real_t* c){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = b[i] + a[i];
}
/**
* DMR mixed
* */
template<typename real_t, typename half_t>
__global__ void vector_sum_dmr(real_t* a, real_t* b, real_t* c, half_t* c_half){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
auto ai = a[i];
auto bi = b[i];
half_t bh = half_t(bi);
half_t ah = half_t(ai);
c[i] = bi + ai;
c_half[i] = bh + ah;
}
/**
* Compare overload
**/
__device__ __forceinline__
bool diff(double lhs, double rhs){
return (fabs(lhs - rhs) > EPSILON);
}
__device__ __forceinline__
bool diff(double lhs, float rhs){
auto lhs_float = float(lhs);
uint32_t ulhs = *((uint32_t*) &lhs_float);
uint32_t urhs = *((uint32_t*) &rhs);
auto diff_val = (ulhs > urhs) ? ulhs - urhs : urhs - ulhs;
return (diff_val > 2);
}
template<typename real_t, typename half_t>
__global__ void comparator(real_t* lhs, half_t* rhs){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
auto lhsi = lhs[i];
auto rhsi = rhs[i];
if(diff(lhsi, rhsi)){
printf("Thread %d - lhs %.6e rhs %.6e\n", i, lhsi, rhsi);
}
}
int main(){
//time counters
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
//sizes
constexpr int iterations = 100;
constexpr size_t blocks = 8192;
constexpr size_t threads = 1024;
constexpr size_t size = blocks * threads;
hvector<double> a_host(size);
hvector<double> b_host(size);
hvector<double> c_host(size, 0);
hvector<float> c_dmr_host(size, 0);
for(int i = 0; i < size; i++){
a_host[i] = i;
b_host[i] = 1.0/double(i * 2);
}
dvector<double> a_dev(a_host);
dvector<double> b_dev(b_host);
dvector<double> c_dev(c_host);
dvector<float> c_dmr_dev(c_dmr_host);
dvector<double> c_dmr_full_dev(c_host);
//Original no dmr
auto original_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
hipLaunchKernelGGL(( vector_sum), dim3(blocks), dim3(threads), 0, 0, a_dev.data, b_dev.data, c_dev.data);
gpuErrchk(hipDeviceSynchronize());
}
auto original_time_t2 = high_resolution_clock::now();
// full DMR
auto full_dmr_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
hipLaunchKernelGGL(( vector_sum), dim3(blocks), dim3(threads), 0, 0, a_dev.data, b_dev.data, c_dmr_full_dev.data);
hipLaunchKernelGGL(( vector_sum), dim3(blocks), dim3(threads), 0, 0, a_dev.data, b_dev.data, c_dev.data);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( comparator), dim3(blocks), dim3(threads), 0, 0, c_dmr_full_dev.data, c_dev.data);
gpuErrchk(hipDeviceSynchronize());
}
auto full_dmr_time_t2 = high_resolution_clock::now();
//Mixed dmr
auto mixed_dmr_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
hipLaunchKernelGGL(( vector_sum_dmr), dim3(blocks), dim3(threads), 0, 0, a_dev.data, b_dev.data, c_dev.data, c_dmr_dev.data);
hipLaunchKernelGGL(( comparator), dim3(blocks), dim3(threads), 0, 0, c_dev.data, c_dmr_dev.data);
gpuErrchk(hipDeviceSynchronize());
}
auto mixed_dmr_time_t2 = high_resolution_clock::now();
/* Getting number of milliseconds as a double. */
duration<double, std::milli> ms_original = original_time_t2 - original_time_t1;
duration<double, std::milli> ms_full = full_dmr_time_t2 - full_dmr_time_t1;
duration<double, std::milli> ms_mixed = mixed_dmr_time_t2 - mixed_dmr_time_t1;
std::cout << "ms_original: " << ms_original.count() << "ms\n";
std::cout << "ms_full: " << ms_full.count() << "ms\n";
std::cout << "ms_mixed: " << ms_mixed.count() << "ms\n";
}
| f42413bc53b46fad8979f6a6c398d376ab643f06.cu | #include <iostream>
#include <chrono>
#include <vector>
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
std::cerr << "GPUassert: " << cudaGetErrorString(code) << " " << file << " " << line << std::endl;
if (abort) exit(code);
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define EPSILON 1.0e-7
template<typename real_t>
using hvector = std::vector<real_t>;
template<typename real_t>
struct dvector{
real_t *data;
dvector(hvector<real_t>& v){
cudaMalloc(&data, v.size() * sizeof(real_t));
cudaMemcpy(data, v.data(), v.size() * sizeof(real_t), cudaMemcpyHostToDevice);
}
~dvector(){cudaFree(data);}
void to_vector(hvector<real_t>& v){ cudaMemcpy(v.data(), data, v.size() * sizeof(real_t)); }
};
/**
* ORIGINAL
* */
template<typename real_t>
__global__ void vector_sum(real_t* a, real_t* b, real_t* c){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = b[i] + a[i];
}
/**
* DMR mixed
* */
template<typename real_t, typename half_t>
__global__ void vector_sum_dmr(real_t* a, real_t* b, real_t* c, half_t* c_half){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
auto ai = a[i];
auto bi = b[i];
half_t bh = half_t(bi);
half_t ah = half_t(ai);
c[i] = bi + ai;
c_half[i] = bh + ah;
}
/**
* Compare overload
**/
__device__ __forceinline__
bool diff(double lhs, double rhs){
return (fabs(lhs - rhs) > EPSILON);
}
__device__ __forceinline__
bool diff(double lhs, float rhs){
auto lhs_float = float(lhs);
uint32_t ulhs = *((uint32_t*) &lhs_float);
uint32_t urhs = *((uint32_t*) &rhs);
auto diff_val = (ulhs > urhs) ? ulhs - urhs : urhs - ulhs;
return (diff_val > 2);
}
template<typename real_t, typename half_t>
__global__ void comparator(real_t* lhs, half_t* rhs){
auto i = threadIdx.x + blockIdx.x * blockDim.x;
auto lhsi = lhs[i];
auto rhsi = rhs[i];
if(diff(lhsi, rhsi)){
printf("Thread %d - lhs %.6e rhs %.6e\n", i, lhsi, rhsi);
}
}
int main(){
//time counters
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
//sizes
constexpr int iterations = 100;
constexpr size_t blocks = 8192;
constexpr size_t threads = 1024;
constexpr size_t size = blocks * threads;
hvector<double> a_host(size);
hvector<double> b_host(size);
hvector<double> c_host(size, 0);
hvector<float> c_dmr_host(size, 0);
for(int i = 0; i < size; i++){
a_host[i] = i;
b_host[i] = 1.0/double(i * 2);
}
dvector<double> a_dev(a_host);
dvector<double> b_dev(b_host);
dvector<double> c_dev(c_host);
dvector<float> c_dmr_dev(c_dmr_host);
dvector<double> c_dmr_full_dev(c_host);
//Original no dmr
auto original_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data);
gpuErrchk(cudaDeviceSynchronize());
}
auto original_time_t2 = high_resolution_clock::now();
// full DMR
auto full_dmr_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dmr_full_dev.data);
vector_sum<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data);
gpuErrchk(cudaDeviceSynchronize());
comparator<<<blocks, threads>>>(c_dmr_full_dev.data, c_dev.data);
gpuErrchk(cudaDeviceSynchronize());
}
auto full_dmr_time_t2 = high_resolution_clock::now();
//Mixed dmr
auto mixed_dmr_time_t1 = high_resolution_clock::now();
for(int it = 0; it < iterations; it++){
vector_sum_dmr<<<blocks, threads>>>(a_dev.data, b_dev.data, c_dev.data, c_dmr_dev.data);
comparator<<<blocks, threads>>>(c_dev.data, c_dmr_dev.data);
gpuErrchk(cudaDeviceSynchronize());
}
auto mixed_dmr_time_t2 = high_resolution_clock::now();
/* Getting number of milliseconds as a double. */
duration<double, std::milli> ms_original = original_time_t2 - original_time_t1;
duration<double, std::milli> ms_full = full_dmr_time_t2 - full_dmr_time_t1;
duration<double, std::milli> ms_mixed = mixed_dmr_time_t2 - mixed_dmr_time_t1;
std::cout << "ms_original: " << ms_original.count() << "ms\n";
std::cout << "ms_full: " << ms_full.count() << "ms\n";
std::cout << "ms_mixed: " << ms_mixed.count() << "ms\n";
}
|
502c2ab96e6ea113460647b6fd01589b87c67fe5.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_sssp.cu
*
* @brief Simple test driver program for single source shorest path.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// SSSP includes
#include <gunrock/app/sssp/sssp_enactor.cuh>
#include <gunrock/app/sssp/sssp_problem.cuh>
#include <gunrock/app/sssp/sssp_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <gunrock/priority_queue/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes for CPU dijkstra SSSP reference algorithms
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/property_map/property_map.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::sssp;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
" test_sssp <graph type> <graph type args> [--device=<device_index>]\n"
" [--undirected] [--instrumented] [--src=<source index>] [--quick=<0|1>]\n"
" [--mark-pred] [--queue-sizing=<scale factor>] [--traversal-mode=<0|1>]\n"
" [--in-sizing=<in/out queue scale factor>] [--disable-size-check]\n"
" [--grid-size=<grid size>] [partition_method=<random|biasrandom|clustered|metis>]\n"
" [--v] [--iteration-num=<num>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins SSSP from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at:\n"
" (graph-edges * <scale factor>) [Default: 1.0].\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
" --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced,\n"
" 1 for Dynamic-Cooperative [Default: dynamic\n"
" determine based on average degree].\n"
);
}
/**
* @brief Displays the SSSP result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] num_nodes Number of nodes in the graph.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution (VertexId *source_path, SizeT num_nodes)
{
if (num_nodes > 40) num_nodes = 40;
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
printf(" ");
}
printf("]\n");
}
/******************************************************************************
* SSSP Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference SSSP ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam MARK_PREDECESSORS
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] node_values Host-side vector to store CPU computed labels for each node
* @param[in] node_preds Host-side vector to store CPU computed predecessors for each node
* @param[in] src Source node where SSSP starts
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS >
void SimpleReferenceSssp(
const Csr<VertexId, Value, SizeT> &graph,
Value *node_values,
VertexId *node_preds,
VertexId src,
bool quiet)
{
using namespace boost;
// Prepare Boost Datatype and Data structure
typedef adjacency_list<vecS, vecS, directedS, no_property,
property <edge_weight_t, unsigned int> > Graph;
typedef graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef graph_traits<Graph>::edge_descriptor edge_descriptor;
typedef std::pair<VertexId, VertexId> Edge;
Edge *edges = ( Edge*)malloc(sizeof( Edge) * graph.edges);
Value *weight = (Value*)malloc(sizeof(Value) * graph.edges);
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
edges[j] = Edge(i, graph.column_indices[j]);
weight[j] = graph.edge_values[j];
}
}
Graph g(edges, edges + graph.edges, weight, graph.nodes);
std::vector<Value> d(graph.nodes);
std::vector<vertex_descriptor> p(graph.nodes);
vertex_descriptor s = vertex(src, g);
property_map<Graph, vertex_index_t>::type indexmap = get(vertex_index, g);
//
// Perform SSSP
//
CpuTimer cpu_timer;
cpu_timer.Start();
if (MARK_PREDECESSORS)
{
dijkstra_shortest_paths(g, s,
predecessor_map(boost::make_iterator_property_map(
p.begin(), get(boost::vertex_index, g))).distance_map(
boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
}
else
{
dijkstra_shortest_paths(g, s,
distance_map(boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
if (!quiet) { printf("CPU SSSP finished in %lf msec.\n", elapsed); }
Coo<Value, Value>* sort_dist = NULL;
Coo<VertexId, VertexId>* sort_pred = NULL;
sort_dist = (Coo<Value, Value>*)malloc(
sizeof(Coo<Value, Value>) * graph.nodes);
if (MARK_PREDECESSORS)
{
sort_pred = (Coo<VertexId, VertexId>*)malloc(
sizeof(Coo<VertexId, VertexId>) * graph.nodes);
}
graph_traits < Graph >::vertex_iterator vi, vend;
for (tie(vi, vend) = vertices(g); vi != vend; ++vi)
{
sort_dist[(*vi)].row = (*vi);
sort_dist[(*vi)].col = d[(*vi)];
}
std::stable_sort(
sort_dist, sort_dist + graph.nodes,
RowFirstTupleCompare<Coo<Value, Value> >);
if (MARK_PREDECESSORS)
{
for (tie(vi, vend) = vertices(g); vi != vend; ++vi)
{
sort_pred[(*vi)].row = (*vi);
sort_pred[(*vi)].col = p[(*vi)];
}
std::stable_sort(
sort_pred, sort_pred + graph.nodes,
RowFirstTupleCompare< Coo<VertexId, VertexId> >);
}
for (int i = 0; i < graph.nodes; ++i)
{
node_values[i] = sort_dist[i].col;
}
if (MARK_PREDECESSORS)
{
for (int i = 0; i < graph.nodes; ++i)
{
node_preds[i] = sort_pred[i].col;
}
}
if (sort_dist) free(sort_dist);
if (sort_pred) free(sort_pred);
}
/**
* @brief Run SSSP tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
*
* @param[in] parameter Pointer to test parameter settings
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS >
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef SSSPProblem < VertexId,
SizeT,
Value,
MARK_PREDECESSORS > Problem;
typedef SSSPEnactor < Problem,
INSTRUMENT,
DEBUG,
SIZE_CHECK > Enactor;
// parse configurations from mObject info
Csr<VertexId, Value, SizeT> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
double max_queue_sizing = info->info["max_queue_sizing"].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real();
double max_in_sizing = info->info["max_in_sizing"].get_real();
std::string partition_method = info->info["partition_method"].get_str();
double partition_factor = info->info["partition_factor"].get_real();
int partition_seed = info->info["partition_seed"].get_int();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool quick_mode = info->info["quick_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
int traversal_mode = info->info["traversal_mode"].get_int();
int iterations = info->info["num_iteration"].get_int();
int delta_factor = info->info["delta_factor"].get_int();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
// Allocate host-side array (for both reference and GPU-computed results)
Value *reference_labels = new Value[graph->nodes];
Value *h_labels = new Value[graph->nodes];
Value *reference_check_label = (quick_mode) ? NULL : reference_labels;
VertexId *reference_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL;
VertexId *h_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL;
VertexId *reference_check_pred = (quick_mode || !MARK_PREDECESSORS) ? NULL : reference_preds;
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&(org_size[gpu]), &dummy);
}
// Allocate SSSP enactor map
Enactor* enactor = new Enactor(num_gpus, gpu_idx);
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
delta_factor,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"SSSP Problem Init failed", __FILE__, __LINE__);
util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"SSSP Enactor Init failed", __FILE__, __LINE__);
// compute reference CPU SSSP solution for source-distance
if (reference_check_label != NULL)
{
if (!quiet_mode) { printf("Computing reference value ...\n"); }
SimpleReferenceSssp<VertexId, Value, SizeT, MARK_PREDECESSORS>(
*graph,
reference_check_label,
reference_check_pred,
src,
quiet_mode);
if (!quiet_mode) { printf("\n"); }
}
double elapsed = 0.0f;
// perform SSSP
CpuTimer cpu_timer;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(problem->Reset(
src, enactor->GetFrontierType(), max_queue_sizing),
"SSSP Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(enactor->Reset(),
"SSSP Enactor Reset failed", __FILE__, __LINE__);
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
cpu_timer.Start();
util::GRError(enactor->Enact(src, traversal_mode),
"SSSP Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
if (!quiet_mode)
{
printf("--------------------------\n"); fflush(stdout);
}
elapsed += cpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// Copy out results
util::GRError(problem->Extract(h_labels, h_preds),
"SSSP Problem Data Extraction Failed", __FILE__, __LINE__);
for (SizeT i = 0; i < graph->nodes; i++)
{
if (reference_check_label[i] == -1)
{
reference_check_label[i] = util::MaxValue<Value>();
}
}
if (!quiet_mode)
{
// Display Solution
printf("\nFirst 40 labels of the GPU result.\n");
DisplaySolution(h_labels, graph->nodes);
}
// Verify the result
if (reference_check_label != NULL)
{
if (!quiet_mode) { printf("Label Validity: "); }
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode) { printf("%d errors occurred.\n", error_num); }
}
if (!quiet_mode)
{
printf("\nFirst 40 labels of the reference CPU result.\n");
DisplaySolution(reference_check_label, graph->nodes);
}
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), elapsed, h_labels);
if (!quiet_mode)
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
if (!quiet_mode)
{
if (MARK_PREDECESSORS)
{
printf("\nFirst 40 preds of the GPU result.\n");
DisplaySolution(h_preds, graph->nodes);
if (reference_check_label != NULL)
{
printf("\nFirst 40 preds of the reference CPU result (could be different because the paths are not unique).\n");
DisplaySolution(reference_check_pred, graph->nodes);
}
}
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);}
else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
if (num_gpus > 1) printf(" #keys%d", num_gpus);
printf("\n");
double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor;
}
if (num_gpus > 1 && i != 0 )
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_) max_in_sizing_ = factor;
}
}
if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_);
printf("\n");
}
// Clean up
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (reference_labels) {delete[] reference_labels; reference_labels = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;}
if (h_preds ) {delete[] h_preds ; h_preds = NULL;}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void RunTests_mark_predecessors(Info<VertexId, Value, SizeT> *info)
{
if (info->info["mark_predecessors"].get_bool())
{
RunTests<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, true>(info);
}
else
{
RunTests<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool())
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, true>(info);
}
else
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool())
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info);
}
else
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool())
{
RunTests_debug<VertexId, Value, SizeT, true>(info);
}
else
{
RunTests_debug<VertexId, Value, SizeT, false>(info);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
typedef int VertexId; // Use int as the vertex identifier
typedef int Value; // Use int as the value type
typedef int SizeT; // Use int as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // graph we process on
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
// graph construction or generation related parameters
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->info["edge_value"] = true; // require per edge weight values
info->Init("SSSP", args, csr); // initialize Info structure
RunTests_instrumented<VertexId, Value, SizeT>(info); // run test
return 0;
}
| 502c2ab96e6ea113460647b6fd01589b87c67fe5.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_sssp.cu
*
* @brief Simple test driver program for single source shorest path.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// SSSP includes
#include <gunrock/app/sssp/sssp_enactor.cuh>
#include <gunrock/app/sssp/sssp_problem.cuh>
#include <gunrock/app/sssp/sssp_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <gunrock/priority_queue/kernel.cuh>
#include <moderngpu.cuh>
// Boost includes for CPU dijkstra SSSP reference algorithms
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include <boost/property_map/property_map.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::sssp;
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
" test_sssp <graph type> <graph type args> [--device=<device_index>]\n"
" [--undirected] [--instrumented] [--src=<source index>] [--quick=<0|1>]\n"
" [--mark-pred] [--queue-sizing=<scale factor>] [--traversal-mode=<0|1>]\n"
" [--in-sizing=<in/out queue scale factor>] [--disable-size-check]\n"
" [--grid-size=<grid size>] [partition_method=<random|biasrandom|clustered|metis>]\n"
" [--v] [--iteration-num=<num>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed / undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the test. [Default: 0].\n"
" --undirected Treat the graph as undirected (symmetric).\n"
" --instrumented Keep kernels statics [Default: Disable].\n"
" total_queued, search_depth and barrier duty\n"
" (a relative indicator of load imbalance.)\n"
" --src=<source vertex id> Begins SSSP from the source [Default: 0].\n"
" If randomize: from a random source vertex.\n"
" If largestdegree: from largest degree vertex.\n"
" --quick=<0 or 1> Skip the CPU validation: 1, or not: 0 [Default: 1].\n"
" --mark-pred Keep both label info and predecessor info.\n"
" --queue-sizing=<factor> Allocates a frontier queue sized at:\n"
" (graph-edges * <scale factor>) [Default: 1.0].\n"
" --v Print verbose per iteration debug info.\n"
" --iteration-num=<number> Number of runs to perform the test [Default: 1].\n"
" --traversal-mode=<0 or 1> Set traversal strategy, 0 for Load-Balanced,\n"
" 1 for Dynamic-Cooperative [Default: dynamic\n"
" determine based on average degree].\n"
);
}
/**
* @brief Displays the SSSP result (i.e., distance from source)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] source_path Search depth from the source for each node.
* @param[in] num_nodes Number of nodes in the graph.
*/
template<typename VertexId, typename SizeT>
void DisplaySolution (VertexId *source_path, SizeT num_nodes)
{
if (num_nodes > 40) num_nodes = 40;
printf("[");
for (VertexId i = 0; i < num_nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(source_path[i]);
printf(" ");
}
printf("]\n");
}
/******************************************************************************
* SSSP Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference SSSP ranking implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam MARK_PREDECESSORS
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] node_values Host-side vector to store CPU computed labels for each node
* @param[in] node_preds Host-side vector to store CPU computed predecessors for each node
* @param[in] src Source node where SSSP starts
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool MARK_PREDECESSORS >
void SimpleReferenceSssp(
const Csr<VertexId, Value, SizeT> &graph,
Value *node_values,
VertexId *node_preds,
VertexId src,
bool quiet)
{
using namespace boost;
// Prepare Boost Datatype and Data structure
typedef adjacency_list<vecS, vecS, directedS, no_property,
property <edge_weight_t, unsigned int> > Graph;
typedef graph_traits<Graph>::vertex_descriptor vertex_descriptor;
typedef graph_traits<Graph>::edge_descriptor edge_descriptor;
typedef std::pair<VertexId, VertexId> Edge;
Edge *edges = ( Edge*)malloc(sizeof( Edge) * graph.edges);
Value *weight = (Value*)malloc(sizeof(Value) * graph.edges);
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j)
{
edges[j] = Edge(i, graph.column_indices[j]);
weight[j] = graph.edge_values[j];
}
}
Graph g(edges, edges + graph.edges, weight, graph.nodes);
std::vector<Value> d(graph.nodes);
std::vector<vertex_descriptor> p(graph.nodes);
vertex_descriptor s = vertex(src, g);
property_map<Graph, vertex_index_t>::type indexmap = get(vertex_index, g);
//
// Perform SSSP
//
CpuTimer cpu_timer;
cpu_timer.Start();
if (MARK_PREDECESSORS)
{
dijkstra_shortest_paths(g, s,
predecessor_map(boost::make_iterator_property_map(
p.begin(), get(boost::vertex_index, g))).distance_map(
boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
}
else
{
dijkstra_shortest_paths(g, s,
distance_map(boost::make_iterator_property_map(
d.begin(), get(boost::vertex_index, g))));
}
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
if (!quiet) { printf("CPU SSSP finished in %lf msec.\n", elapsed); }
Coo<Value, Value>* sort_dist = NULL;
Coo<VertexId, VertexId>* sort_pred = NULL;
sort_dist = (Coo<Value, Value>*)malloc(
sizeof(Coo<Value, Value>) * graph.nodes);
if (MARK_PREDECESSORS)
{
sort_pred = (Coo<VertexId, VertexId>*)malloc(
sizeof(Coo<VertexId, VertexId>) * graph.nodes);
}
graph_traits < Graph >::vertex_iterator vi, vend;
for (tie(vi, vend) = vertices(g); vi != vend; ++vi)
{
sort_dist[(*vi)].row = (*vi);
sort_dist[(*vi)].col = d[(*vi)];
}
std::stable_sort(
sort_dist, sort_dist + graph.nodes,
RowFirstTupleCompare<Coo<Value, Value> >);
if (MARK_PREDECESSORS)
{
for (tie(vi, vend) = vertices(g); vi != vend; ++vi)
{
sort_pred[(*vi)].row = (*vi);
sort_pred[(*vi)].col = p[(*vi)];
}
std::stable_sort(
sort_pred, sort_pred + graph.nodes,
RowFirstTupleCompare< Coo<VertexId, VertexId> >);
}
for (int i = 0; i < graph.nodes; ++i)
{
node_values[i] = sort_dist[i].col;
}
if (MARK_PREDECESSORS)
{
for (int i = 0; i < graph.nodes; ++i)
{
node_preds[i] = sort_pred[i].col;
}
}
if (sort_dist) free(sort_dist);
if (sort_pred) free(sort_pred);
}
/**
* @brief Run SSSP tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam MARK_PREDECESSORS
*
* @param[in] parameter Pointer to test parameter settings
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK,
bool MARK_PREDECESSORS >
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef SSSPProblem < VertexId,
SizeT,
Value,
MARK_PREDECESSORS > Problem;
typedef SSSPEnactor < Problem,
INSTRUMENT,
DEBUG,
SIZE_CHECK > Enactor;
// parse configurations from mObject info
Csr<VertexId, Value, SizeT> *graph = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
double max_queue_sizing = info->info["max_queue_sizing"].get_real();
double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real();
double max_in_sizing = info->info["max_in_sizing"].get_real();
std::string partition_method = info->info["partition_method"].get_str();
double partition_factor = info->info["partition_factor"].get_real();
int partition_seed = info->info["partition_seed"].get_int();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool quick_mode = info->info["quick_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
int traversal_mode = info->info["traversal_mode"].get_int();
int iterations = info->info["num_iteration"].get_int();
int delta_factor = info->info["delta_factor"].get_int();
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// TODO: remove after merge mgpu-cq
ContextPtr *context = (ContextPtr*) info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
// Allocate host-side array (for both reference and GPU-computed results)
Value *reference_labels = new Value[graph->nodes];
Value *h_labels = new Value[graph->nodes];
Value *reference_check_label = (quick_mode) ? NULL : reference_labels;
VertexId *reference_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL;
VertexId *h_preds = MARK_PREDECESSORS ? new VertexId[graph->nodes] : NULL;
VertexId *reference_check_pred = (quick_mode || !MARK_PREDECESSORS) ? NULL : reference_preds;
size_t *org_size = new size_t[num_gpus];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&(org_size[gpu]), &dummy);
}
// Allocate SSSP enactor map
Enactor* enactor = new Enactor(num_gpus, gpu_idx);
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem->Init(
stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
delta_factor,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"SSSP Problem Init failed", __FILE__, __LINE__);
util::GRError(enactor->Init(
context, problem, max_grid_size, traversal_mode),
"SSSP Enactor Init failed", __FILE__, __LINE__);
// compute reference CPU SSSP solution for source-distance
if (reference_check_label != NULL)
{
if (!quiet_mode) { printf("Computing reference value ...\n"); }
SimpleReferenceSssp<VertexId, Value, SizeT, MARK_PREDECESSORS>(
*graph,
reference_check_label,
reference_check_pred,
src,
quiet_mode);
if (!quiet_mode) { printf("\n"); }
}
double elapsed = 0.0f;
// perform SSSP
CpuTimer cpu_timer;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(problem->Reset(
src, enactor->GetFrontierType(), max_queue_sizing),
"SSSP Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(enactor->Reset(),
"SSSP Enactor Reset failed", __FILE__, __LINE__);
if (!quiet_mode)
{
printf("__________________________\n"); fflush(stdout);
}
cpu_timer.Start();
util::GRError(enactor->Enact(src, traversal_mode),
"SSSP Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
if (!quiet_mode)
{
printf("--------------------------\n"); fflush(stdout);
}
elapsed += cpu_timer.ElapsedMillis();
}
elapsed /= iterations;
// Copy out results
util::GRError(problem->Extract(h_labels, h_preds),
"SSSP Problem Data Extraction Failed", __FILE__, __LINE__);
for (SizeT i = 0; i < graph->nodes; i++)
{
if (reference_check_label[i] == -1)
{
reference_check_label[i] = util::MaxValue<Value>();
}
}
if (!quiet_mode)
{
// Display Solution
printf("\nFirst 40 labels of the GPU result.\n");
DisplaySolution(h_labels, graph->nodes);
}
// Verify the result
if (reference_check_label != NULL)
{
if (!quiet_mode) { printf("Label Validity: "); }
int error_num = CompareResults(
h_labels, reference_check_label,
graph->nodes, true, quiet_mode);
if (error_num > 0)
{
if (!quiet_mode) { printf("%d errors occurred.\n", error_num); }
}
if (!quiet_mode)
{
printf("\nFirst 40 labels of the reference CPU result.\n");
DisplaySolution(reference_check_label, graph->nodes);
}
}
info->ComputeTraversalStats( // compute running statistics
enactor->enactor_stats.GetPointer(), elapsed, h_labels);
if (!quiet_mode)
{
info->DisplayStats(); // display collected statistics
}
info->CollectInfo(); // collected all the info and put into JSON mObject
if (!quiet_mode)
{
if (MARK_PREDECESSORS)
{
printf("\nFirst 40 preds of the GPU result.\n");
DisplaySolution(h_preds, graph->nodes);
if (reference_check_label != NULL)
{
printf("\nFirst 40 preds of the reference CPU result (could be different because the paths are not unique).\n");
DisplaySolution(reference_check_pred, graph->nodes);
}
}
printf("\n\tMemory Usage(B)\t");
for (int gpu = 0; gpu < num_gpus; gpu++)
if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);}
else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);
if (num_gpus > 1) printf(" #keys%d", num_gpus);
printf("\n");
double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0;
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t gpu_free, dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&gpu_free, &dummy);
printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free);
for (int i = 0; i < num_gpus; i++)
{
for (int j = 0; j < 2; j++)
{
SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes);
if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor;
}
if (num_gpus > 1 && i != 0 )
for (int t = 0; t < 2; t++)
{
SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize();
printf("\t %lld", (long long) x);
double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i];
if (factor > max_in_sizing_) max_in_sizing_ = factor;
}
}
if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize()));
printf("\n");
}
printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]);
if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_);
printf("\n");
}
// Clean up
if (org_size ) {delete[] org_size ; org_size = NULL;}
if (enactor ) {delete enactor ; enactor = NULL;}
if (problem ) {delete problem ; problem = NULL;}
if (reference_labels) {delete[] reference_labels; reference_labels = NULL;}
if (h_labels ) {delete[] h_labels ; h_labels = NULL;}
if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;}
if (h_preds ) {delete[] h_preds ; h_preds = NULL;}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
* @tparam SIZE_CHECK
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void RunTests_mark_predecessors(Info<VertexId, Value, SizeT> *info)
{
if (info->info["mark_predecessors"].get_bool())
{
RunTests<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, true>(info);
}
else
{
RunTests<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, SIZE_CHECK, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
* @tparam DEBUG
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool())
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, true>(info);
}
else
{
RunTests_mark_predecessors<VertexId, Value, SizeT, INSTRUMENT,
DEBUG, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool())
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info);
}
else
{
RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info);
}
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] info Pointer to mObject info.
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool())
{
RunTests_debug<VertexId, Value, SizeT, true>(info);
}
else
{
RunTests_debug<VertexId, Value, SizeT, false>(info);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help"))
{
Usage();
return 1;
}
typedef int VertexId; // Use int as the vertex identifier
typedef int Value; // Use int as the value type
typedef int SizeT; // Use int as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // graph we process on
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
// graph construction or generation related parameters
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->info["edge_value"] = true; // require per edge weight values
info->Init("SSSP", args, csr); // initialize Info structure
RunTests_instrumented<VertexId, Value, SizeT>(info); // run test
return 0;
}
|
d9cfa8307c8c01c72bb24453ecd768f46843227c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int index = threadIdx.x;
xyz1 += n*3*index;
xyz2 += m*3*index;
idx += m*nsample*index;
for (int j=0;j<m;++j) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int index = threadIdx.x;
points += n*c*index;
idx += m*nsample*index;
out += m*nsample*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int index = threadIdx.x;
idx += m*nsample*index;
grad_out += m*nsample*c*index;
grad_points += n*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
}
int main()
{
int b=32,n=512,m=128,nsample=64,c=64;
float radius=0.1;
float *xyz1, *xyz2, *points;
hipMallocManaged(&xyz1, b*n*3*sizeof(float));
hipMallocManaged(&xyz2, b*m*3*sizeof(float));
hipMallocManaged(&points, b*n*c*sizeof(float));
int *idx;
hipMallocManaged(&idx, b*m*nsample*sizeof(int));
memset(idx, 0, sizeof(int)*b*m*nsample);
float *out, *grad_out;
hipMallocManaged(&out, b*m*nsample*c*sizeof(float));
hipMallocManaged(&grad_out, b*m*nsample*c*sizeof(float));
memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c);
float *grad_points;
hipMallocManaged(&grad_points, b*n*c*sizeof(float));
for (int i=0;i<b*n*3;i++)
xyz1[i]=randomf();
for (int i=0;i<b*m*3;i++)
xyz2[i]=randomf();
for (int i=0;i<b*n*c;i++)
points[i]=randomf();
double t0=get_time();
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(1),dim3(b), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx);
hipDeviceSynchronize();
printf("query_ball_point gpu time %f\n",get_time()-t0);
t0=get_time();
hipLaunchKernelGGL(( group_point_gpu), dim3(1),dim3(b), 0, 0, b,n,c,m,nsample,points,idx,out);
hipDeviceSynchronize();
printf("grou_point gpu time %f\n",get_time()-t0);
t0=get_time();
hipLaunchKernelGGL(( group_point_grad_gpu), dim3(1),dim3(b), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points);
hipDeviceSynchronize();
printf("grou_point_grad gpu time %f\n",get_time()-t0);
hipFree(xyz1);
hipFree(xyz2);
hipFree(points);
hipFree(idx);
hipFree(out);
hipFree(grad_out);
hipFree(grad_points);
return 0;
}
| d9cfa8307c8c01c72bb24453ecd768f46843227c.cu | #include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int index = threadIdx.x;
xyz1 += n*3*index;
xyz2 += m*3*index;
idx += m*nsample*index;
for (int j=0;j<m;++j) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int index = threadIdx.x;
points += n*c*index;
idx += m*nsample*index;
out += m*nsample*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int index = threadIdx.x;
idx += m*nsample*index;
grad_out += m*nsample*c*index;
grad_points += n*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
}
int main()
{
int b=32,n=512,m=128,nsample=64,c=64;
float radius=0.1;
float *xyz1, *xyz2, *points;
cudaMallocManaged(&xyz1, b*n*3*sizeof(float));
cudaMallocManaged(&xyz2, b*m*3*sizeof(float));
cudaMallocManaged(&points, b*n*c*sizeof(float));
int *idx;
cudaMallocManaged(&idx, b*m*nsample*sizeof(int));
memset(idx, 0, sizeof(int)*b*m*nsample);
float *out, *grad_out;
cudaMallocManaged(&out, b*m*nsample*c*sizeof(float));
cudaMallocManaged(&grad_out, b*m*nsample*c*sizeof(float));
memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c);
float *grad_points;
cudaMallocManaged(&grad_points, b*n*c*sizeof(float));
for (int i=0;i<b*n*3;i++)
xyz1[i]=randomf();
for (int i=0;i<b*m*3;i++)
xyz2[i]=randomf();
for (int i=0;i<b*n*c;i++)
points[i]=randomf();
double t0=get_time();
query_ball_point_gpu<<<1,b>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
cudaDeviceSynchronize();
printf("query_ball_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_gpu<<<1,b>>>(b,n,c,m,nsample,points,idx,out);
cudaDeviceSynchronize();
printf("grou_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_grad_gpu<<<1,b>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
cudaDeviceSynchronize();
printf("grou_point_grad gpu time %f\n",get_time()-t0);
cudaFree(xyz1);
cudaFree(xyz2);
cudaFree(points);
cudaFree(idx);
cudaFree(out);
cudaFree(grad_out);
cudaFree(grad_points);
return 0;
}
|
35db13e0a96d248a040fbf2c29a6bbe5d1504ba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "filters.cuh"
__global__ void set_cyan_kernel(
uchar3* frame,
const uint frame_resolution)
{
uint index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < frame_resolution)
{
frame[index].x = 0;
frame[index].y = 100;
frame[index].z = 130;
index += blockDim.x * gridDim.x;
}
}
__global__ void swap_rb_kernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStep<uchar3> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < src.cols && y < src.rows)
{
uchar3 v = src(y, x);
dst(y, x) = make_uchar3(v.z, v.y, v.x);
}
}
__global__ void remove_b_kernel(cv::cuda::PtrStepSz<uchar3> src)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < src.cols && y < src.rows)
{
uchar3 v = src(y, x);
//NB: In opencv channel order is BGR
src(y, x) = make_uchar3(0, v.y, v.z);
}
}
void swap_rb_caller(const cv::cuda::PtrStepSz<uchar3>& src, cv::cuda::PtrStep<uchar3> dst)
{
dim3 threads(16, 16);
dim3 blocks((src.cols + threads.x - 1) / threads.x, (src.rows + threads.y - 1) / threads.y);
hipLaunchKernelGGL(( swap_rb_kernel) , dim3(blocks), dim3(threads), 0, 0, src, dst);
}
void remove_b_caller(cv::cuda::PtrStepSz<uchar3> src)
{
unsigned int threads_2d = get_max_threads_2d();
dim3 threads(threads_2d, threads_2d);
dim3 blocks((src.cols + threads_2d - 1) / threads_2d, (src.rows + threads_2d - 1) / threads_2d);
hipLaunchKernelGGL(( remove_b_kernel) , dim3(blocks), dim3(threads), 0, 0, src);
}
void set_cyan_caller(
void * frame,
const uint frame_resolution)
{
uint threads = get_max_threads();
uint blocks = map_blocks_to_problem(frame_resolution, threads);
set_cyan_kernel << <blocks, threads, 0, 0 >> > (static_cast<uchar3 *>(frame), frame_resolution);
}
| 35db13e0a96d248a040fbf2c29a6bbe5d1504ba1.cu | #include "filters.cuh"
__global__ void set_cyan_kernel(
uchar3* frame,
const uint frame_resolution)
{
uint index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < frame_resolution)
{
frame[index].x = 0;
frame[index].y = 100;
frame[index].z = 130;
index += blockDim.x * gridDim.x;
}
}
__global__ void swap_rb_kernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStep<uchar3> dst)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < src.cols && y < src.rows)
{
uchar3 v = src(y, x);
dst(y, x) = make_uchar3(v.z, v.y, v.x);
}
}
__global__ void remove_b_kernel(cv::cuda::PtrStepSz<uchar3> src)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < src.cols && y < src.rows)
{
uchar3 v = src(y, x);
//NB: In opencv channel order is BGR
src(y, x) = make_uchar3(0, v.y, v.z);
}
}
void swap_rb_caller(const cv::cuda::PtrStepSz<uchar3>& src, cv::cuda::PtrStep<uchar3> dst)
{
dim3 threads(16, 16);
dim3 blocks((src.cols + threads.x - 1) / threads.x, (src.rows + threads.y - 1) / threads.y);
swap_rb_kernel <<<blocks, threads, 0, 0>>> (src, dst);
}
void remove_b_caller(cv::cuda::PtrStepSz<uchar3> src)
{
unsigned int threads_2d = get_max_threads_2d();
dim3 threads(threads_2d, threads_2d);
dim3 blocks((src.cols + threads_2d - 1) / threads_2d, (src.rows + threads_2d - 1) / threads_2d);
remove_b_kernel <<<blocks, threads, 0, 0>>> (src);
}
void set_cyan_caller(
void * frame,
const uint frame_resolution)
{
uint threads = get_max_threads();
uint blocks = map_blocks_to_problem(frame_resolution, threads);
set_cyan_kernel << <blocks, threads, 0, 0 >> > (static_cast<uchar3 *>(frame), frame_resolution);
}
|
a8bd2b32ab81f4eb7acb0988b39889e2860f0c70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// compile for the geforce GTX 970 (run normally like a c program)
// nvcc -arch=sm_52 hello_world.cu -o hello_world && ./hello_world
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
} | a8bd2b32ab81f4eb7acb0988b39889e2860f0c70.cu | // compile for the geforce GTX 970 (run normally like a c program)
// nvcc -arch=sm_52 hello_world.cu -o hello_world && ./hello_world
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 7;
const int blocksize = 7;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello ";
int b[N] = {15, 10, 6, 0, -11, 1, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
printf("%s\n", a);
return EXIT_SUCCESS;
} |
2f18ae2efae781f82dc0a1fe35da6cbc7adb9088.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "c_utils.h"
#include "des.h"
#include "des_utils.h"
#include "bit_utils.h"
#include "des_consts.h"
#include "des_kernel.h"
#include "cuda_utils.h"
void parse_args(int argc, char** argv, int *key_length);
void usage(char* name);
void parse_args(int argc, char** argv, int *key_length){
if (argc < 2){
usage(argv[0]);
}
*key_length = atoi(argv[1]);
if (*key_length <=0 || *key_length>64){
usage(argv[0]);
}
}
void usage(char* name){
printf("Usage:\n %s key_length(1-64)\n",name);
exit(EXIT_FAILURE);
}
int main(int argc, char** argv) {
int key_length;
parse_args(argc,argv,&key_length);
//printf("Key length: %d \n",key_length);
uint64_t key = des_generate_key_length(key_length);
uint64_t block = 0x0123456789ABCDEF;
uint64_t encoded = full_des_encode_block(key, block);
_cudaSetDevice(1);
//printf("Real key:\n");
//bits_print_grouped(key, 8, 64);
//printf("Cracking...\n");
uint64_t cracked_key = 0;
clock_t start = clock();
run_des_crack(block, encoded, key_length, &cracked_key);
clock_t end = clock();
float seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("%d,%f\n", key_length,seconds);
//printf("Cracked key:\n");
//bits_print_grouped(cracked_key, 8, 64);
//bits_print_grouped(encoded,8,64);
//bits_print_grouped(full_des_encode_block(cracked_key,block),8,64);
return EXIT_SUCCESS;
}
| 2f18ae2efae781f82dc0a1fe35da6cbc7adb9088.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "c_utils.h"
#include "des.h"
#include "des_utils.h"
#include "bit_utils.h"
#include "des_consts.h"
#include "des_kernel.h"
#include "cuda_utils.h"
void parse_args(int argc, char** argv, int *key_length);
void usage(char* name);
void parse_args(int argc, char** argv, int *key_length){
if (argc < 2){
usage(argv[0]);
}
*key_length = atoi(argv[1]);
if (*key_length <=0 || *key_length>64){
usage(argv[0]);
}
}
void usage(char* name){
printf("Usage:\n %s key_length(1-64)\n",name);
exit(EXIT_FAILURE);
}
int main(int argc, char** argv) {
int key_length;
parse_args(argc,argv,&key_length);
//printf("Key length: %d \n",key_length);
uint64_t key = des_generate_key_length(key_length);
uint64_t block = 0x0123456789ABCDEF;
uint64_t encoded = full_des_encode_block(key, block);
_cudaSetDevice(1);
//printf("Real key:\n");
//bits_print_grouped(key, 8, 64);
//printf("Cracking...\n");
uint64_t cracked_key = 0;
clock_t start = clock();
run_des_crack(block, encoded, key_length, &cracked_key);
clock_t end = clock();
float seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("%d,%f\n", key_length,seconds);
//printf("Cracked key:\n");
//bits_print_grouped(cracked_key, 8, 64);
//bits_print_grouped(encoded,8,64);
//bits_print_grouped(full_des_encode_block(cracked_key,block),8,64);
return EXIT_SUCCESS;
}
|
4daa1a753727151d1d05f6f3effb7fef92a7199c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<float16> { \
inline __host__ __device__ float16 \
operator()(const float16& lhs, const float16& rhs) const { \
return convert::To<float, float16>(convert::To<float16, float>( \
lhs) expr convert::To<float16, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
void BinaryOpWith2DBroadcasting(
const int ndim,
const int* dims,
const int pivot,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int rows =
std::accumulate(dims, dims + pivot, 1, std::multiplies<int>());
const int cols =
std::accumulate(dims + pivot, dims + ndim, 1, std::multiplies<int>());
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
return;
}
int pivot;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&pivot,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
ndim,
C_dims_array.data(),
pivot,
true,
broadcast_1st,
op,
A,
B,
C,
context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&pivot,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
ndim,
C_dims_array.data(),
pivot,
false,
broadcast_1st,
op,
A,
B,
C,
context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( Func##CUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( SinCosCUDAKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call hipblasHgemm
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}; // namespace
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, hipblasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(A_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), A_size, (half*)A, A_fp32);
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(B_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
hipLaunchKernelGGL(( FloatToHalfKernel),
dim3(CAFFE_GET_BLOCKS(batch_size * M * N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
TransA, TransB, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(hipblasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
LDA,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
&result,
HIP_R_16F,
HIP_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float* alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float16>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void
AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n,
const float a,
const float* x,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= d_img < 0 || d_img >= img_shape.data[d_i + 1];
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
if (X == Y) {
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
} else if (outer_size > 0) {
math::Set<T, CUDAContext>(outer_size, init, Y, context);
}
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
| 4daa1a753727151d1d05f6f3effb7fef92a7199c.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math_utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<float16> { \
inline __host__ __device__ float16 \
operator()(const float16& lhs, const float16& rhs) const { \
return convert::To<float, float16>(convert::To<float16, float>( \
lhs) expr convert::To<float16, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
sincos(__ldg(X + i), S + i, C + i);
#else
sincos(X[i], S + i, C + i);
#endif
}
}
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = cols.Mod(C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FixedDivisor<int> cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = cols.Div(C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FixedDivisor<int>, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
C_dims.data[i].DivMod(C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
void BinaryOpWith2DBroadcasting(
const int ndim,
const int* dims,
const int pivot,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int rows =
std::accumulate(dims, dims + pivot, 1, std::multiplies<int>());
const int cols =
std::accumulate(dims + pivot, dims + ndim, 1, std::multiplies<int>());
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FixedDivisor<int> cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FixedDivisor<int>, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FixedDivisor<int>(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
return;
}
int pivot;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&pivot,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
ndim,
C_dims_array.data(),
pivot,
true,
broadcast_1st,
op,
A,
B,
C,
context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&pivot,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
ndim,
C_dims_array.data(),
pivot,
false,
broadcast_1st,
op,
A,
B,
C,
context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, op) \
__global__ void Func##CUDAKernel(const int N, const T* X, T* Y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
Y[i] = op(X[i]); \
} \
} \
template <> \
void Func<T, CUDAContext>( \
const int N, const T* x, T* y, CUDAContext* context) { \
Func##CUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
SinCosCUDAKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Func<TIn, CUDAContext>( \
const int N, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
SimpleBinaryOpCUDAKernel<TIn, TOut, Op<TIn>> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, Op<TIn>(), A, B, C); \
}
#define DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_SIMPLE_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_SIMPLE_CUDA_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int32_t, std::int32_t, Func, Op) \
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_SIMPLE_CUDA_BITWISE_BINARY_FUNCTION
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
float,
float,
ElemwiseMax,
thrust::maximum);
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FixedDivisor<int> cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float16, float16, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call cublasHgemm
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}; // namespace
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, cublasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(A_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(A_size, (half*)A, A_fp32);
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(B_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(batch_size * M * N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
TransA, TransB, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(cublasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
LDA,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
SetKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
&result,
CUDA_R_16F,
CUDA_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
SelectKernel<float16>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernel<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float* alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
AxpyKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
AxpyKernel<float16>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void
AxpbyKernel(const int n, const T a, const T* x, const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n,
const float a,
const float* x,
const float b,
float* y,
CUDAContext* context) {
AxpbyKernel<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = (h >= 0 && w >= 0 && h < input_h && w < input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= d_img < 0 || d_img >= img_shape.data[d_i + 1];
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
if (X == Y) {
return;
}
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
if (transpose_axes[pivot] == pivot) {
RowwiseReduceKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
} else if (outer_size > 0) {
math::Set<T, CUDAContext>(outer_size, init, Y, context);
}
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
std::vector<int> transpose_axes(num_dims);
utils::ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (outer_size > 0 && inner_size > 0) {
if (transpose_axes[pivot] == pivot) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
Y_dims.data[i].DivMod(Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
utils::ComputeTransposedStrides(D, dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (utils::IsIdentityPermutation(ndim, axes)) { \
const int size = \
std::accumulate(dims, dims + ndim, 1, std::multiplies<int>()); \
context->template Copy<T, CUDAContext, CUDAContext>(size, X, Y); \
return; \
} \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
|
382941b2a4a76027fd517b00a6834736d2ea8c82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Mark Sutherland, Josh San Miguel
// - Univ of Toronto
// Calls fast GPU implementations to create the requested GPU images (blur, resize, and sobel X/Y).
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include "sdvbs_common.h"
#include "../kernels/imageBlur_kernel.h"
#include "../kernels/imageBlur_kernel_stage2.h"
#include "../kernels/imageResize_kernel.h"
#include "../kernels/imageResize_kernel_st2.h"
#include "../kernels/calcSobel_dX_kernel.h"
#include "../kernels/calcSobel_dY_kernel.h"
ImagePyramid* createImgPyramid(I2D* imageIn, hipStream_t d_stream)
{
int rows, cols;
rows = imageIn->height;
cols = imageIn->width;
// setup kernels, thread objects, and GPU memory
int weightedKernel[5] = {1,4,6,4,1};
int sobelKernel_1[3] = {1,2,1};
int sobelKernel_2[3] = {1,0,-1};
//dim3 nblocks(4,3);
dim3 threadsPerBlock(32,32);
// dynamically calculate how many thread blocks to launch
int rowsIn = floor((rows+1)/4);
int colsIn = floor((cols+1)/4);
int resizedRows = floor((rows+1)/2);
int resizedCols = floor((cols+1)/2);
int nBlocksWide = colsIn/32;
if (colsIn % 32) nBlocksWide++;
int nBlocksTall = rowsIn/32;
if (rowsIn % 32) nBlocksTall++;
dim3 nblocks(nBlocksWide,nBlocksTall);
//printf("Calculated block dimensions as: %d x %d\n",nBlocksWide,nBlocksTall);
int* d_inputPixels;
float* d_outputPixels;
float* d_intermediate;
int* d_weightedKernel,*sobel_kern_1,*sobel_kern_2;
float* resizeInt, *dxInt, *dyInt, *dyInt_small, *dxInt_small;
float* resizeOutput, *dxOutput, *dyOutput, *dxOutput_small, *dyOutput_small;
//Pin host memory array for greatest speed transfer.
HANDLE_ERROR( hipHostRegister(&(imageIn->data[0]),rows*cols*sizeof(int),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&weightedKernel[0],5*sizeof(int),hipHostRegisterPortable) ) ;
HANDLE_ERROR( hipHostRegister(&sobelKernel_1[0],3*sizeof(int),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&sobelKernel_2[0],3*sizeof(int),hipHostRegisterPortable) );
// SET UP MEMORY - local data
hipMalloc((void**)&(imageIn->d_weightedKernel),5*sizeof(int));
hipMalloc((void**)&(imageIn->sobel_kern_1),3*sizeof(int));
hipMalloc((void**)&(imageIn->sobel_kern_2),3*sizeof(int));
d_weightedKernel = imageIn->d_weightedKernel;
sobel_kern_1 = imageIn->sobel_kern_1;
sobel_kern_2 = imageIn->sobel_kern_2;
hipMemcpyAsync(d_weightedKernel,&(weightedKernel[0]),5*sizeof(int),hipMemcpyHostToDevice,d_stream);
hipMemcpyAsync(sobel_kern_1,&(sobelKernel_1[0]),3*sizeof(int),hipMemcpyHostToDevice,d_stream);
hipMemcpyAsync(sobel_kern_2,&(sobelKernel_2[0]),3*sizeof(int),hipMemcpyHostToDevice,d_stream);
hipStreamSynchronize(d_stream);
// SET UP MEMORY
hipMalloc((void**)&(imageIn->d_inputPixels),rows*cols*sizeof(int));
hipMalloc((void**)&(imageIn->d_outputPixels),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->d_intermediate),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->resizeInt),rows*resizedCols*sizeof(float));
hipMalloc((void**)&(imageIn->dxInt),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->dyInt),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->resizeOutput),resizedRows*resizedCols*sizeof(float));
hipMalloc((void**)&(imageIn->dxOutput),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->dyOutput),rows*cols*sizeof(float));
hipMalloc((void**)&(imageIn->dxOutput_small),resizedRows*resizedCols*sizeof(float));
hipMalloc((void**)&(imageIn->dyOutput_small),resizedRows*resizedCols*sizeof(float));
hipMalloc((void**)&(imageIn->dxInt_small),resizedRows*resizedCols*sizeof(float));
hipMalloc((void**)&(imageIn->dyInt_small),resizedRows*resizedCols*sizeof(float));
d_inputPixels = imageIn->d_inputPixels;
d_outputPixels = imageIn->d_outputPixels;
d_intermediate = imageIn->d_intermediate;
resizeInt = imageIn->resizeInt;
dxInt = imageIn->dxInt;
dyInt = imageIn->dyInt;
dyInt_small = imageIn->dyInt_small;
dxInt_small = imageIn->dxInt_small;
resizeOutput = imageIn->resizeOutput;
dxOutput = imageIn->dxOutput;
dyOutput = imageIn->dyOutput;
dxOutput_small = imageIn->dxOutput_small;
dyOutput_small = imageIn->dyOutput_small;
// Copy in input data and input kernels.
hipMemcpyAsync(d_inputPixels,&(imageIn->data[0]),rows*cols*sizeof(int),hipMemcpyHostToDevice,d_stream);
// clear outputs since we only access some of these pixels, others must be blank
hipMemsetAsync(d_outputPixels,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(d_intermediate,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(resizeOutput,0,resizedRows*resizedCols*sizeof(float),d_stream);
hipMemsetAsync(resizeInt,0,rows*resizedCols*sizeof(float),d_stream);
hipMemsetAsync(dxOutput,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(dyOutput,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(dxInt,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(dyInt,0,rows*cols*sizeof(float),d_stream);
hipMemsetAsync(dxOutput_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
hipMemsetAsync(dyOutput_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
hipMemsetAsync(dxInt_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
hipMemsetAsync(dyInt_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
/* Kernel call */
hipLaunchKernelGGL(( blurKernel_st1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, d_inputPixels,d_intermediate,d_weightedKernel,cols,rows);
hipLaunchKernelGGL(( blurKernel_st2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, d_outputPixels,d_intermediate,d_weightedKernel,cols,rows);
/* Call all kernels in one stream (order does not matter as they all read their input from d_outputPixels) */
hipLaunchKernelGGL(( resizeKernel_st1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, d_outputPixels,resizeInt,d_weightedKernel,rows,cols,resizedRows,resizedCols);
hipLaunchKernelGGL(( resizeKernel_st2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, resizeOutput,resizeInt,d_weightedKernel,rows,cols,resizedRows,resizedCols);
/* Calc dX Sobel filter */
hipLaunchKernelGGL(( calcSobel_dX_k1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, d_outputPixels,dxInt,sobel_kern_1,sobel_kern_2,cols,rows);
hipLaunchKernelGGL(( calcSobel_dX_k2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, dxInt,dxOutput,sobel_kern_1,sobel_kern_2,cols,rows);
hipLaunchKernelGGL(( calcSobel_dY_k1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, d_outputPixels,dyInt,sobel_kern_1,sobel_kern_2,cols,rows);
hipLaunchKernelGGL(( calcSobel_dY_k2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, dyInt,dyOutput,sobel_kern_1,sobel_kern_2,cols,rows);
/* Calc level 2 sobel filter (on resized images) */
hipLaunchKernelGGL(( calcSobel_dX_k1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, resizeOutput,dxInt_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
hipLaunchKernelGGL(( calcSobel_dX_k2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, dxInt_small,dxOutput_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
hipLaunchKernelGGL(( calcSobel_dY_k1), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, resizeOutput,dyInt_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
hipLaunchKernelGGL(( calcSobel_dY_k2), dim3(nblocks),dim3(threadsPerBlock),0,d_stream, dyInt_small,dyOutput_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
// deep copy into the destination F2D structures
ImagePyramid* retStruct = (ImagePyramid*)malloc(sizeof(ImagePyramid));
// alloc these sub-arrays as pinned memory (required for copyAsync)
retStruct->blurredImg = fSetArray(rows,cols,0);
retStruct->resizedImg = fSetArray(resizedRows,resizedCols,0);
retStruct->horizEdge = fSetArray(rows,cols,0);
retStruct->vertEdge = fSetArray(rows,cols,0);
retStruct->horizEdge_small = fSetArray(resizedRows,resizedCols,0);
retStruct->vertEdge_small = fSetArray(resizedRows,resizedCols,0);
retStruct->tmp = fSetArray(rows,cols,0);
HANDLE_ERROR( hipHostRegister(&(retStruct->blurredImg->data[0]),rows*cols*sizeof(float),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&(retStruct->resizedImg->data[0]),resizedRows*resizedCols*sizeof(float),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&(retStruct->horizEdge->data[0]),rows*cols*sizeof(float),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&(retStruct->vertEdge->data[0]),rows*cols*sizeof(float),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&(retStruct->horizEdge_small->data[0]),resizedRows*resizedCols*sizeof(float),hipHostRegisterPortable) );
HANDLE_ERROR( hipHostRegister(&(retStruct->vertEdge_small->data[0]),resizedRows*resizedCols*sizeof(float),hipHostRegisterPortable) );
hipMemcpyAsync((void*)&(retStruct->blurredImg->data[0]),d_outputPixels,rows*cols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
hipMemcpyAsync((void*)&(retStruct->resizedImg->data[0]),resizeOutput,resizedRows*resizedCols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
hipMemcpyAsync((void*)&(retStruct->vertEdge->data[0]),dxOutput,rows*cols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
hipMemcpyAsync((void*)&(retStruct->horizEdge->data[0]),dyOutput,rows*cols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
hipMemcpyAsync((void*)&(retStruct->vertEdge_small->data[0]),dxOutput_small,resizedRows*resizedCols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
hipMemcpyAsync((void*)&(retStruct->horizEdge_small->data[0]),dyOutput_small,resizedRows*resizedCols*sizeof(float),hipMemcpyDeviceToHost,d_stream);
// UNSET Host memory pinning - local data
hipHostUnregister(&weightedKernel[0]);
hipHostUnregister(&sobelKernel_1[0]);
hipHostUnregister(&sobelKernel_2[0]);
return retStruct;
}
void destroyImgPyramid(I2D* imageIn, ImagePyramid *retStruct)
{
// UNSET Host memory pinning.
hipHostUnregister(&(imageIn->data[0]));
hipHostUnregister(&(retStruct->blurredImg->data[0]));
hipHostUnregister(&(retStruct->resizedImg->data[0]));
hipHostUnregister(&(retStruct->horizEdge->data[0]));
hipHostUnregister(&(retStruct->vertEdge->data[0]));
hipHostUnregister(&(retStruct->horizEdge_small->data[0]));
hipHostUnregister(&(retStruct->vertEdge_small->data[0]));
hipHostUnregister(&(retStruct->tmp->data[0]));
hipFree(imageIn->d_weightedKernel);
hipFree(imageIn->sobel_kern_1);
hipFree(imageIn->sobel_kern_2);
hipFree(imageIn->resizeInt);
hipFree(imageIn->dxInt);
hipFree(imageIn->dyInt);
hipFree(imageIn->resizeOutput);
hipFree(imageIn->dxOutput);
hipFree(imageIn->dyOutput);
hipFree(imageIn->d_inputPixels);
hipFree(imageIn->d_outputPixels);
hipFree(imageIn->d_intermediate);
hipFree(imageIn->dxInt_small);
hipFree(imageIn->dyInt_small);
hipFree(imageIn->dxOutput_small);
hipFree(imageIn->dyOutput_small);
}
| 382941b2a4a76027fd517b00a6834736d2ea8c82.cu | // Mark Sutherland, Josh San Miguel
// - Univ of Toronto
// Calls fast GPU implementations to create the requested GPU images (blur, resize, and sobel X/Y).
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include "sdvbs_common.h"
#include "../kernels/imageBlur_kernel.h"
#include "../kernels/imageBlur_kernel_stage2.h"
#include "../kernels/imageResize_kernel.h"
#include "../kernels/imageResize_kernel_st2.h"
#include "../kernels/calcSobel_dX_kernel.h"
#include "../kernels/calcSobel_dY_kernel.h"
ImagePyramid* createImgPyramid(I2D* imageIn, cudaStream_t d_stream)
{
int rows, cols;
rows = imageIn->height;
cols = imageIn->width;
// setup kernels, thread objects, and GPU memory
int weightedKernel[5] = {1,4,6,4,1};
int sobelKernel_1[3] = {1,2,1};
int sobelKernel_2[3] = {1,0,-1};
//dim3 nblocks(4,3);
dim3 threadsPerBlock(32,32);
// dynamically calculate how many thread blocks to launch
int rowsIn = floor((rows+1)/4);
int colsIn = floor((cols+1)/4);
int resizedRows = floor((rows+1)/2);
int resizedCols = floor((cols+1)/2);
int nBlocksWide = colsIn/32;
if (colsIn % 32) nBlocksWide++;
int nBlocksTall = rowsIn/32;
if (rowsIn % 32) nBlocksTall++;
dim3 nblocks(nBlocksWide,nBlocksTall);
//printf("Calculated block dimensions as: %d x %d\n",nBlocksWide,nBlocksTall);
int* d_inputPixels;
float* d_outputPixels;
float* d_intermediate;
int* d_weightedKernel,*sobel_kern_1,*sobel_kern_2;
float* resizeInt, *dxInt, *dyInt, *dyInt_small, *dxInt_small;
float* resizeOutput, *dxOutput, *dyOutput, *dxOutput_small, *dyOutput_small;
//Pin host memory array for greatest speed transfer.
HANDLE_ERROR( cudaHostRegister(&(imageIn->data[0]),rows*cols*sizeof(int),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&weightedKernel[0],5*sizeof(int),cudaHostRegisterPortable) ) ;
HANDLE_ERROR( cudaHostRegister(&sobelKernel_1[0],3*sizeof(int),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&sobelKernel_2[0],3*sizeof(int),cudaHostRegisterPortable) );
// SET UP MEMORY - local data
cudaMalloc((void**)&(imageIn->d_weightedKernel),5*sizeof(int));
cudaMalloc((void**)&(imageIn->sobel_kern_1),3*sizeof(int));
cudaMalloc((void**)&(imageIn->sobel_kern_2),3*sizeof(int));
d_weightedKernel = imageIn->d_weightedKernel;
sobel_kern_1 = imageIn->sobel_kern_1;
sobel_kern_2 = imageIn->sobel_kern_2;
cudaMemcpyAsync(d_weightedKernel,&(weightedKernel[0]),5*sizeof(int),cudaMemcpyHostToDevice,d_stream);
cudaMemcpyAsync(sobel_kern_1,&(sobelKernel_1[0]),3*sizeof(int),cudaMemcpyHostToDevice,d_stream);
cudaMemcpyAsync(sobel_kern_2,&(sobelKernel_2[0]),3*sizeof(int),cudaMemcpyHostToDevice,d_stream);
cudaStreamSynchronize(d_stream);
// SET UP MEMORY
cudaMalloc((void**)&(imageIn->d_inputPixels),rows*cols*sizeof(int));
cudaMalloc((void**)&(imageIn->d_outputPixels),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->d_intermediate),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->resizeInt),rows*resizedCols*sizeof(float));
cudaMalloc((void**)&(imageIn->dxInt),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->dyInt),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->resizeOutput),resizedRows*resizedCols*sizeof(float));
cudaMalloc((void**)&(imageIn->dxOutput),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->dyOutput),rows*cols*sizeof(float));
cudaMalloc((void**)&(imageIn->dxOutput_small),resizedRows*resizedCols*sizeof(float));
cudaMalloc((void**)&(imageIn->dyOutput_small),resizedRows*resizedCols*sizeof(float));
cudaMalloc((void**)&(imageIn->dxInt_small),resizedRows*resizedCols*sizeof(float));
cudaMalloc((void**)&(imageIn->dyInt_small),resizedRows*resizedCols*sizeof(float));
d_inputPixels = imageIn->d_inputPixels;
d_outputPixels = imageIn->d_outputPixels;
d_intermediate = imageIn->d_intermediate;
resizeInt = imageIn->resizeInt;
dxInt = imageIn->dxInt;
dyInt = imageIn->dyInt;
dyInt_small = imageIn->dyInt_small;
dxInt_small = imageIn->dxInt_small;
resizeOutput = imageIn->resizeOutput;
dxOutput = imageIn->dxOutput;
dyOutput = imageIn->dyOutput;
dxOutput_small = imageIn->dxOutput_small;
dyOutput_small = imageIn->dyOutput_small;
// Copy in input data and input kernels.
cudaMemcpyAsync(d_inputPixels,&(imageIn->data[0]),rows*cols*sizeof(int),cudaMemcpyHostToDevice,d_stream);
// clear outputs since we only access some of these pixels, others must be blank
cudaMemsetAsync(d_outputPixels,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(d_intermediate,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(resizeOutput,0,resizedRows*resizedCols*sizeof(float),d_stream);
cudaMemsetAsync(resizeInt,0,rows*resizedCols*sizeof(float),d_stream);
cudaMemsetAsync(dxOutput,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(dyOutput,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(dxInt,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(dyInt,0,rows*cols*sizeof(float),d_stream);
cudaMemsetAsync(dxOutput_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
cudaMemsetAsync(dyOutput_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
cudaMemsetAsync(dxInt_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
cudaMemsetAsync(dyInt_small,0,resizedRows*resizedCols*sizeof(float),d_stream);
/* Kernel call */
blurKernel_st1<<<nblocks,threadsPerBlock,0,d_stream>>>(d_inputPixels,d_intermediate,d_weightedKernel,cols,rows);
blurKernel_st2<<<nblocks,threadsPerBlock,0,d_stream>>>(d_outputPixels,d_intermediate,d_weightedKernel,cols,rows);
/* Call all kernels in one stream (order does not matter as they all read their input from d_outputPixels) */
resizeKernel_st1<<<nblocks,threadsPerBlock,0,d_stream>>>(d_outputPixels,resizeInt,d_weightedKernel,rows,cols,resizedRows,resizedCols);
resizeKernel_st2<<<nblocks,threadsPerBlock,0,d_stream>>>(resizeOutput,resizeInt,d_weightedKernel,rows,cols,resizedRows,resizedCols);
/* Calc dX Sobel filter */
calcSobel_dX_k1<<<nblocks,threadsPerBlock,0,d_stream>>>(d_outputPixels,dxInt,sobel_kern_1,sobel_kern_2,cols,rows);
calcSobel_dX_k2<<<nblocks,threadsPerBlock,0,d_stream>>>(dxInt,dxOutput,sobel_kern_1,sobel_kern_2,cols,rows);
calcSobel_dY_k1<<<nblocks,threadsPerBlock,0,d_stream>>>(d_outputPixels,dyInt,sobel_kern_1,sobel_kern_2,cols,rows);
calcSobel_dY_k2<<<nblocks,threadsPerBlock,0,d_stream>>>(dyInt,dyOutput,sobel_kern_1,sobel_kern_2,cols,rows);
/* Calc level 2 sobel filter (on resized images) */
calcSobel_dX_k1<<<nblocks,threadsPerBlock,0,d_stream>>>(resizeOutput,dxInt_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
calcSobel_dX_k2<<<nblocks,threadsPerBlock,0,d_stream>>>(dxInt_small,dxOutput_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
calcSobel_dY_k1<<<nblocks,threadsPerBlock,0,d_stream>>>(resizeOutput,dyInt_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
calcSobel_dY_k2<<<nblocks,threadsPerBlock,0,d_stream>>>(dyInt_small,dyOutput_small,sobel_kern_1,sobel_kern_2,resizedCols,resizedRows);
// deep copy into the destination F2D structures
ImagePyramid* retStruct = (ImagePyramid*)malloc(sizeof(ImagePyramid));
// alloc these sub-arrays as pinned memory (required for copyAsync)
retStruct->blurredImg = fSetArray(rows,cols,0);
retStruct->resizedImg = fSetArray(resizedRows,resizedCols,0);
retStruct->horizEdge = fSetArray(rows,cols,0);
retStruct->vertEdge = fSetArray(rows,cols,0);
retStruct->horizEdge_small = fSetArray(resizedRows,resizedCols,0);
retStruct->vertEdge_small = fSetArray(resizedRows,resizedCols,0);
retStruct->tmp = fSetArray(rows,cols,0);
HANDLE_ERROR( cudaHostRegister(&(retStruct->blurredImg->data[0]),rows*cols*sizeof(float),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&(retStruct->resizedImg->data[0]),resizedRows*resizedCols*sizeof(float),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&(retStruct->horizEdge->data[0]),rows*cols*sizeof(float),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&(retStruct->vertEdge->data[0]),rows*cols*sizeof(float),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&(retStruct->horizEdge_small->data[0]),resizedRows*resizedCols*sizeof(float),cudaHostRegisterPortable) );
HANDLE_ERROR( cudaHostRegister(&(retStruct->vertEdge_small->data[0]),resizedRows*resizedCols*sizeof(float),cudaHostRegisterPortable) );
cudaMemcpyAsync((void*)&(retStruct->blurredImg->data[0]),d_outputPixels,rows*cols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
cudaMemcpyAsync((void*)&(retStruct->resizedImg->data[0]),resizeOutput,resizedRows*resizedCols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
cudaMemcpyAsync((void*)&(retStruct->vertEdge->data[0]),dxOutput,rows*cols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
cudaMemcpyAsync((void*)&(retStruct->horizEdge->data[0]),dyOutput,rows*cols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
cudaMemcpyAsync((void*)&(retStruct->vertEdge_small->data[0]),dxOutput_small,resizedRows*resizedCols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
cudaMemcpyAsync((void*)&(retStruct->horizEdge_small->data[0]),dyOutput_small,resizedRows*resizedCols*sizeof(float),cudaMemcpyDeviceToHost,d_stream);
// UNSET Host memory pinning - local data
cudaHostUnregister(&weightedKernel[0]);
cudaHostUnregister(&sobelKernel_1[0]);
cudaHostUnregister(&sobelKernel_2[0]);
return retStruct;
}
void destroyImgPyramid(I2D* imageIn, ImagePyramid *retStruct)
{
// UNSET Host memory pinning.
cudaHostUnregister(&(imageIn->data[0]));
cudaHostUnregister(&(retStruct->blurredImg->data[0]));
cudaHostUnregister(&(retStruct->resizedImg->data[0]));
cudaHostUnregister(&(retStruct->horizEdge->data[0]));
cudaHostUnregister(&(retStruct->vertEdge->data[0]));
cudaHostUnregister(&(retStruct->horizEdge_small->data[0]));
cudaHostUnregister(&(retStruct->vertEdge_small->data[0]));
cudaHostUnregister(&(retStruct->tmp->data[0]));
cudaFree(imageIn->d_weightedKernel);
cudaFree(imageIn->sobel_kern_1);
cudaFree(imageIn->sobel_kern_2);
cudaFree(imageIn->resizeInt);
cudaFree(imageIn->dxInt);
cudaFree(imageIn->dyInt);
cudaFree(imageIn->resizeOutput);
cudaFree(imageIn->dxOutput);
cudaFree(imageIn->dyOutput);
cudaFree(imageIn->d_inputPixels);
cudaFree(imageIn->d_outputPixels);
cudaFree(imageIn->d_intermediate);
cudaFree(imageIn->dxInt_small);
cudaFree(imageIn->dyInt_small);
cudaFree(imageIn->dxOutput_small);
cudaFree(imageIn->dyOutput_small);
}
|
f3c225a0c22b81ef8075a7db845b42f20000711d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
*
******************************************************************************/
/******************************************************************************
* Bottom-level digit-reduction/counting kernel
******************************************************************************/
#pragma once
#include "radixsort_kernel_common.cu"
namespace b40c {
/******************************************************************************
* Cycle-processing Routines
******************************************************************************/
template <int BYTE>
__device__ __forceinline__ int DecodeInt(int encoded){
int retval;
ExtractKeyBits<int, BYTE * 8, 8>::Extract(retval, encoded);
return retval;
}
//-----------------------------------------------------------------------------
template <int PARTIAL>
__device__ __forceinline__ void ReduceLanePartial(
int local_counts[4],
int *scan_lanes,
int lane_offset)
{
unsigned char* encoded = (unsigned char *) &scan_lanes[lane_offset + (PARTIAL * B40C_WARP_THREADS)];
local_counts[0] += encoded[0];
local_counts[1] += encoded[1];
local_counts[2] += encoded[2];
local_counts[3] += encoded[3];
}
template <int LANE, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int REDUCTION_PARTIALS_PER_LANE, int LANE_PARTIALS_PER_THREAD>
__device__ __forceinline__ void ReduceLanePartials(
int local_counts[REDUCTION_LANES_PER_WARP][4],
int *scan_lanes,
int lane_offset)
{
lane_offset += (LANE * REDUCTION_PARTIALS_PER_LANE * B40C_RADIXSORT_WARPS);
if ((B40C_RADIXSORT_WARPS < REDUCTION_LANES) || (lane_offset < REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE)) {
if (LANE_PARTIALS_PER_THREAD > 0) ReduceLanePartial<0>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 1) ReduceLanePartial<1>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 2) ReduceLanePartial<2>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 3) ReduceLanePartial<3>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 4) ReduceLanePartial<4>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 5) ReduceLanePartial<5>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 6) ReduceLanePartial<6>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 7) ReduceLanePartial<7>(local_counts[LANE], scan_lanes, lane_offset);
}
}
template <
int REDUCTION_LANES,
int REDUCTION_LANES_PER_WARP,
int LOG_REDUCTION_PARTIALS_PER_LANE,
int REDUCTION_PARTIALS_PER_LANE>
__device__ __forceinline__ void ReduceEncodedCounts(
int local_counts[REDUCTION_LANES_PER_WARP][4],
int *scan_lanes,
int warp_id,
int warp_idx)
{
const int LANE_PARTIALS_PER_THREAD = REDUCTION_PARTIALS_PER_LANE / B40C_WARP_THREADS;
SuppressUnusedConstantWarning(LANE_PARTIALS_PER_THREAD);
int lane_offset = (warp_id << LOG_REDUCTION_PARTIALS_PER_LANE) + warp_idx; // my warp's (first-lane) reduction offset
if (REDUCTION_LANES_PER_WARP > 0) ReduceLanePartials<0, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 1) ReduceLanePartials<1, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 2) ReduceLanePartials<2, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 3) ReduceLanePartials<3, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
}
template <typename K, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
__device__ __forceinline__ void Bucket(
K key,
int *encoded_reduction_col,
PreprocessFunctor preprocess = PreprocessFunctor())
{
preprocess(key);
int lane;
ExtractKeyBits<K, BIT + 2, RADIX_BITS - 2>::Extract(lane, key);
if (B40C_FERMI(__CUDA_ARCH__)) {
// GF100+ has special bit-extraction instructions (instead of shift+mask)
int quad_byte;
if (RADIX_BITS < 2) {
ExtractKeyBits<K, BIT, 1>::Extract(quad_byte, key);
} else {
ExtractKeyBits<K, BIT, 2>::Extract(quad_byte, key);
}
unsigned char *encoded_col = (unsigned char *) &encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)];
encoded_col[quad_byte]++;
} else {
// GT200 can save an instruction because it can source an operand
// directly from smem
const int BYTE_ENCODE_SHIFT = 0x3;
const K QUAD_MASK = (RADIX_BITS < 2) ? 0x1 : 0x3;
int quad_shift = MagnitudeShift<K, BYTE_ENCODE_SHIFT - BIT>(key & (QUAD_MASK << BIT));
encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)] += (1 << quad_shift);
}
}
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor, int CYCLES>
struct LoadOp;
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
K key;
GlobalLoad<K, CACHE_MODIFIER >::Ld(key, d_in_keys, block_offset);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(key, encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
K keys[8];
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[0], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[1], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[2], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[3], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 3));
if (B40C_FERMI(__CUDA_ARCH__)) __syncthreads();
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[4], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 4));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[5], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 5));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[6], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 6));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[7], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 7));
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[0], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[1], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[2], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[3], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[4], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[5], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[6], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[7], encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 8), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 16), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 32), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 64), encoded_reduction_col);
}
};
template <int REDUCTION_LANES>
__device__ __forceinline__ void ResetEncodedCarry(
int *encoded_reduction_col)
{
#pragma unroll
for (int SCAN_LANE = 0; SCAN_LANE < (int) REDUCTION_LANES; SCAN_LANE++) {
encoded_reduction_col[SCAN_LANE * B40C_RADIXSORT_THREADS] = 0;
}
}
template <bool UNROLL, typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads;
// Minimal unrolling
template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads <false, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>
{
__device__ __forceinline__ static void Unroll(
K* d_in_keys,
int &block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds,
int local_counts[REDUCTION_LANES_PER_WARP][4],
int warp_id,
int warp_idx)
{
// Unroll batches of loads with occasional reduction to avoid overflow
while (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 32;
__syncthreads();
// Aggregate back into local_count registers to prevent overflow
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
}
}
};
// Unrolled
template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads <true, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>
{
__device__ __forceinline__ static void Unroll(
K* d_in_keys,
int &block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds,
int local_counts[REDUCTION_LANES_PER_WARP][4],
int warp_id,
int warp_idx)
{
// Unroll batches of loads with occasional reduction to avoid overflow
while (block_offset + (B40C_RADIXSORT_THREADS * 128) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 128;
__syncthreads();
// Aggregate back into local_count registers to prevent overflow
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
}
if (block_offset + (B40C_RADIXSORT_THREADS * 64) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 64;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 32;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 16) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 16;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 8) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 8;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 4) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 4;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 2) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 2;
}
}
};
template <
typename K,
CacheModifier CACHE_MODIFIER,
int BIT,
int RADIX_BITS,
int RADIX_DIGITS,
int REDUCTION_LANES,
int LOG_REDUCTION_PARTIALS_PER_LANE,
int REDUCTION_PARTIALS_PER_LANE,
typename PreprocessFunctor,
bool UNROLL>
__device__ __forceinline__ void ReductionPass(
K* d_in_keys,
int* d_spine,
int block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds)
{
const int REDUCTION_LANES_PER_WARP = (REDUCTION_LANES > B40C_RADIXSORT_WARPS) ? REDUCTION_LANES / B40C_RADIXSORT_WARPS : 1; // Always at least one fours group per warp
const int PARTIALS_PER_ROW = B40C_WARP_THREADS;
const int PADDED_PARTIALS_PER_ROW = PARTIALS_PER_ROW + 1;
int warp_id = threadIdx.x >> B40C_LOG_WARP_THREADS;
int warp_idx = threadIdx.x & (B40C_WARP_THREADS - 1);
block_offset += threadIdx.x;
// Each thread is responsible for aggregating an unencoded segment of a fours-group
int local_counts[REDUCTION_LANES_PER_WARP][4];
// Initialize local counts
#pragma unroll
for (int LANE = 0; LANE < (int) REDUCTION_LANES_PER_WARP; LANE++) {
local_counts[LANE][0] = 0;
local_counts[LANE][1] = 0;
local_counts[LANE][2] = 0;
local_counts[LANE][3] = 0;
}
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
// Process loads in bulk (if applicable)
UnrolledLoads<UNROLL, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>::Unroll(
d_in_keys,
block_offset,
encoded_reduction_col,
scan_lanes,
out_of_bounds + threadIdx.x,
local_counts,
warp_id,
warp_idx);
// Process (potentially-partial) loads singly
while (block_offset < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS;
}
__syncthreads();
// Aggregate back into local_count registers
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
//
// Reduce the local_counts within each reduction lane within each warp
//
// Place into smem
int lane_base = FastMul(warp_id, PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS); // my warp's (first) reduction lane
#pragma unroll
for (int i = 0; i < (int) REDUCTION_LANES_PER_WARP; i++) {
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 0)] = local_counts[i][0];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 1)] = local_counts[i][1];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 2)] = local_counts[i][2];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 3)] = local_counts[i][3];
lane_base += PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS;
}
__syncthreads();
// Rake-reduce and write out the digit_count reductions
if (threadIdx.x < RADIX_DIGITS) {
int lane_base = FastMul(threadIdx.x, PADDED_PARTIALS_PER_ROW);
int digit_count = SerialReduce<PARTIALS_PER_ROW>(scan_lanes + lane_base);
int spine_digit_offset = FastMul(gridDim.x, threadIdx.x) + blockIdx.x;
d_spine[spine_digit_offset] = digit_count;
}
}
template <typename K, typename V, int PASS, int RADIX_BITS, int BIT, typename PreprocessFunctor>
__launch_bounds__ (B40C_RADIXSORT_THREADS, B40C_RADIXSORT_REDUCE_CTA_OCCUPANCY(__CUDA_ARCH__))
__global__
void LsbRakingReductionKernel(
int *d_selectors,
int *d_spine,
K *d_in_keys,
K *d_out_keys,
CtaDecomposition work_decomposition)
{
const int RADIX_DIGITS = 1 << RADIX_BITS;
const int TILE_ELEMENTS = B40C_RADIXSORT_TILE_ELEMENTS(__CUDA_ARCH__, K, V);
const int LOG_REDUCTION_PARTIALS_PER_LANE = B40C_RADIXSORT_LOG_THREADS;
const int REDUCTION_PARTIALS_PER_LANE = 1 << LOG_REDUCTION_PARTIALS_PER_LANE;
const int LOG_REDUCTION_LANES = (RADIX_BITS >= 2) ? RADIX_BITS - 2 : 0; // Always at least one fours group
const int REDUCTION_LANES = 1 << LOG_REDUCTION_LANES;
SuppressUnusedConstantWarning(RADIX_DIGITS);
// Each thread gets its own column of fours-groups (for conflict-free updates)
__shared__ int scan_lanes[REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE];
int *encoded_reduction_col = &scan_lanes[threadIdx.x]; // first element of column
// Determine where to read our input
int selector = (PASS == 0) ? 0 : d_selectors[PASS & 0x1];
if (selector) d_in_keys = d_out_keys;
// Calculate our threadblock's range
int block_offset, block_elements;
if (blockIdx.x < work_decomposition.num_big_blocks) {
block_offset = work_decomposition.big_block_elements * blockIdx.x;
block_elements = work_decomposition.big_block_elements;
} else {
block_offset = (work_decomposition.normal_block_elements * blockIdx.x) + (work_decomposition.num_big_blocks * TILE_ELEMENTS);
block_elements = work_decomposition.normal_block_elements;
}
int out_of_bounds = block_offset + block_elements;
if (blockIdx.x == gridDim.x - 1) {
if (work_decomposition.extra_elements_last_block > 0) {
out_of_bounds -= TILE_ELEMENTS;
}
out_of_bounds += work_decomposition.extra_elements_last_block;
}
// Perform reduction pass
ReductionPass<K, NONE, BIT, RADIX_BITS, RADIX_DIGITS, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor, true>(
d_in_keys,
d_spine,
block_offset,
encoded_reduction_col,
scan_lanes,
out_of_bounds);
}
} // namespace b40c
| f3c225a0c22b81ef8075a7db845b42f20000711d.cu | /******************************************************************************
*
* Copyright 2010 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*
* AUTHORS' REQUEST:
*
* If you use|reference|benchmark this code, please cite our Technical
* Report (http://www.cs.virginia.edu/~dgm4d/papers/RadixSortTR.pdf):
*
* @TechReport{ Merrill:Sorting:2010,
* author = "Duane Merrill and Andrew Grimshaw",
* title = "Revisiting Sorting for GPGPU Stream Architectures",
* year = "2010",
* institution = "University of Virginia, Department of Computer Science",
* address = "Charlottesville, VA, USA",
* number = "CS2010-03"
* }
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
* Thanks!
*
******************************************************************************/
/******************************************************************************
* Bottom-level digit-reduction/counting kernel
******************************************************************************/
#pragma once
#include "radixsort_kernel_common.cu"
namespace b40c {
/******************************************************************************
* Cycle-processing Routines
******************************************************************************/
template <int BYTE>
__device__ __forceinline__ int DecodeInt(int encoded){
int retval;
ExtractKeyBits<int, BYTE * 8, 8>::Extract(retval, encoded);
return retval;
}
//-----------------------------------------------------------------------------
template <int PARTIAL>
__device__ __forceinline__ void ReduceLanePartial(
int local_counts[4],
int *scan_lanes,
int lane_offset)
{
unsigned char* encoded = (unsigned char *) &scan_lanes[lane_offset + (PARTIAL * B40C_WARP_THREADS)];
local_counts[0] += encoded[0];
local_counts[1] += encoded[1];
local_counts[2] += encoded[2];
local_counts[3] += encoded[3];
}
template <int LANE, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int REDUCTION_PARTIALS_PER_LANE, int LANE_PARTIALS_PER_THREAD>
__device__ __forceinline__ void ReduceLanePartials(
int local_counts[REDUCTION_LANES_PER_WARP][4],
int *scan_lanes,
int lane_offset)
{
lane_offset += (LANE * REDUCTION_PARTIALS_PER_LANE * B40C_RADIXSORT_WARPS);
if ((B40C_RADIXSORT_WARPS < REDUCTION_LANES) || (lane_offset < REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE)) {
if (LANE_PARTIALS_PER_THREAD > 0) ReduceLanePartial<0>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 1) ReduceLanePartial<1>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 2) ReduceLanePartial<2>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 3) ReduceLanePartial<3>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 4) ReduceLanePartial<4>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 5) ReduceLanePartial<5>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 6) ReduceLanePartial<6>(local_counts[LANE], scan_lanes, lane_offset);
if (LANE_PARTIALS_PER_THREAD > 7) ReduceLanePartial<7>(local_counts[LANE], scan_lanes, lane_offset);
}
}
template <
int REDUCTION_LANES,
int REDUCTION_LANES_PER_WARP,
int LOG_REDUCTION_PARTIALS_PER_LANE,
int REDUCTION_PARTIALS_PER_LANE>
__device__ __forceinline__ void ReduceEncodedCounts(
int local_counts[REDUCTION_LANES_PER_WARP][4],
int *scan_lanes,
int warp_id,
int warp_idx)
{
const int LANE_PARTIALS_PER_THREAD = REDUCTION_PARTIALS_PER_LANE / B40C_WARP_THREADS;
SuppressUnusedConstantWarning(LANE_PARTIALS_PER_THREAD);
int lane_offset = (warp_id << LOG_REDUCTION_PARTIALS_PER_LANE) + warp_idx; // my warp's (first-lane) reduction offset
if (REDUCTION_LANES_PER_WARP > 0) ReduceLanePartials<0, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 1) ReduceLanePartials<1, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 2) ReduceLanePartials<2, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
if (REDUCTION_LANES_PER_WARP > 3) ReduceLanePartials<3, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset);
}
template <typename K, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
__device__ __forceinline__ void Bucket(
K key,
int *encoded_reduction_col,
PreprocessFunctor preprocess = PreprocessFunctor())
{
preprocess(key);
int lane;
ExtractKeyBits<K, BIT + 2, RADIX_BITS - 2>::Extract(lane, key);
if (B40C_FERMI(__CUDA_ARCH__)) {
// GF100+ has special bit-extraction instructions (instead of shift+mask)
int quad_byte;
if (RADIX_BITS < 2) {
ExtractKeyBits<K, BIT, 1>::Extract(quad_byte, key);
} else {
ExtractKeyBits<K, BIT, 2>::Extract(quad_byte, key);
}
unsigned char *encoded_col = (unsigned char *) &encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)];
encoded_col[quad_byte]++;
} else {
// GT200 can save an instruction because it can source an operand
// directly from smem
const int BYTE_ENCODE_SHIFT = 0x3;
const K QUAD_MASK = (RADIX_BITS < 2) ? 0x1 : 0x3;
int quad_shift = MagnitudeShift<K, BYTE_ENCODE_SHIFT - BIT>(key & (QUAD_MASK << BIT));
encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)] += (1 << quad_shift);
}
}
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor, int CYCLES>
struct LoadOp;
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
K key;
GlobalLoad<K, CACHE_MODIFIER >::Ld(key, d_in_keys, block_offset);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(key, encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>
{
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
K keys[8];
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[0], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[1], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[2], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[3], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 3));
if (B40C_FERMI(__CUDA_ARCH__)) __syncthreads();
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[4], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 4));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[5], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 5));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[6], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 6));
GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[7], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 7));
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[0], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[1], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[2], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[3], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[4], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[5], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[6], encoded_reduction_col);
Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[7], encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 8), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 16), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 32), encoded_reduction_col);
}
};
template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor>
struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128> {
static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col)
{
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col);
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 64), encoded_reduction_col);
}
};
template <int REDUCTION_LANES>
__device__ __forceinline__ void ResetEncodedCarry(
int *encoded_reduction_col)
{
#pragma unroll
for (int SCAN_LANE = 0; SCAN_LANE < (int) REDUCTION_LANES; SCAN_LANE++) {
encoded_reduction_col[SCAN_LANE * B40C_RADIXSORT_THREADS] = 0;
}
}
template <bool UNROLL, typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads;
// Minimal unrolling
template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads <false, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>
{
__device__ __forceinline__ static void Unroll(
K* d_in_keys,
int &block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds,
int local_counts[REDUCTION_LANES_PER_WARP][4],
int warp_id,
int warp_idx)
{
// Unroll batches of loads with occasional reduction to avoid overflow
while (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 32;
__syncthreads();
// Aggregate back into local_count registers to prevent overflow
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
}
}
};
// Unrolled
template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor>
struct UnrolledLoads <true, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>
{
__device__ __forceinline__ static void Unroll(
K* d_in_keys,
int &block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds,
int local_counts[REDUCTION_LANES_PER_WARP][4],
int warp_id,
int warp_idx)
{
// Unroll batches of loads with occasional reduction to avoid overflow
while (block_offset + (B40C_RADIXSORT_THREADS * 128) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 128;
__syncthreads();
// Aggregate back into local_count registers to prevent overflow
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
}
if (block_offset + (B40C_RADIXSORT_THREADS * 64) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 64;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 32;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 16) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 16;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 8) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 8;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 4) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 4;
}
if (block_offset + (B40C_RADIXSORT_THREADS * 2) < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS * 2;
}
}
};
template <
typename K,
CacheModifier CACHE_MODIFIER,
int BIT,
int RADIX_BITS,
int RADIX_DIGITS,
int REDUCTION_LANES,
int LOG_REDUCTION_PARTIALS_PER_LANE,
int REDUCTION_PARTIALS_PER_LANE,
typename PreprocessFunctor,
bool UNROLL>
__device__ __forceinline__ void ReductionPass(
K* d_in_keys,
int* d_spine,
int block_offset,
int* encoded_reduction_col,
int* scan_lanes,
const int& out_of_bounds)
{
const int REDUCTION_LANES_PER_WARP = (REDUCTION_LANES > B40C_RADIXSORT_WARPS) ? REDUCTION_LANES / B40C_RADIXSORT_WARPS : 1; // Always at least one fours group per warp
const int PARTIALS_PER_ROW = B40C_WARP_THREADS;
const int PADDED_PARTIALS_PER_ROW = PARTIALS_PER_ROW + 1;
int warp_id = threadIdx.x >> B40C_LOG_WARP_THREADS;
int warp_idx = threadIdx.x & (B40C_WARP_THREADS - 1);
block_offset += threadIdx.x;
// Each thread is responsible for aggregating an unencoded segment of a fours-group
int local_counts[REDUCTION_LANES_PER_WARP][4];
// Initialize local counts
#pragma unroll
for (int LANE = 0; LANE < (int) REDUCTION_LANES_PER_WARP; LANE++) {
local_counts[LANE][0] = 0;
local_counts[LANE][1] = 0;
local_counts[LANE][2] = 0;
local_counts[LANE][3] = 0;
}
// Reset encoded counters
ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col);
// Process loads in bulk (if applicable)
UnrolledLoads<UNROLL, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>::Unroll(
d_in_keys,
block_offset,
encoded_reduction_col,
scan_lanes,
out_of_bounds + threadIdx.x,
local_counts,
warp_id,
warp_idx);
// Process (potentially-partial) loads singly
while (block_offset < out_of_bounds) {
LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col);
block_offset += B40C_RADIXSORT_THREADS;
}
__syncthreads();
// Aggregate back into local_count registers
ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>(
local_counts,
scan_lanes,
warp_id,
warp_idx);
__syncthreads();
//
// Reduce the local_counts within each reduction lane within each warp
//
// Place into smem
int lane_base = FastMul(warp_id, PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS); // my warp's (first) reduction lane
#pragma unroll
for (int i = 0; i < (int) REDUCTION_LANES_PER_WARP; i++) {
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 0)] = local_counts[i][0];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 1)] = local_counts[i][1];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 2)] = local_counts[i][2];
scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 3)] = local_counts[i][3];
lane_base += PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS;
}
__syncthreads();
// Rake-reduce and write out the digit_count reductions
if (threadIdx.x < RADIX_DIGITS) {
int lane_base = FastMul(threadIdx.x, PADDED_PARTIALS_PER_ROW);
int digit_count = SerialReduce<PARTIALS_PER_ROW>(scan_lanes + lane_base);
int spine_digit_offset = FastMul(gridDim.x, threadIdx.x) + blockIdx.x;
d_spine[spine_digit_offset] = digit_count;
}
}
template <typename K, typename V, int PASS, int RADIX_BITS, int BIT, typename PreprocessFunctor>
__launch_bounds__ (B40C_RADIXSORT_THREADS, B40C_RADIXSORT_REDUCE_CTA_OCCUPANCY(__CUDA_ARCH__))
__global__
void LsbRakingReductionKernel(
int *d_selectors,
int *d_spine,
K *d_in_keys,
K *d_out_keys,
CtaDecomposition work_decomposition)
{
const int RADIX_DIGITS = 1 << RADIX_BITS;
const int TILE_ELEMENTS = B40C_RADIXSORT_TILE_ELEMENTS(__CUDA_ARCH__, K, V);
const int LOG_REDUCTION_PARTIALS_PER_LANE = B40C_RADIXSORT_LOG_THREADS;
const int REDUCTION_PARTIALS_PER_LANE = 1 << LOG_REDUCTION_PARTIALS_PER_LANE;
const int LOG_REDUCTION_LANES = (RADIX_BITS >= 2) ? RADIX_BITS - 2 : 0; // Always at least one fours group
const int REDUCTION_LANES = 1 << LOG_REDUCTION_LANES;
SuppressUnusedConstantWarning(RADIX_DIGITS);
// Each thread gets its own column of fours-groups (for conflict-free updates)
__shared__ int scan_lanes[REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE];
int *encoded_reduction_col = &scan_lanes[threadIdx.x]; // first element of column
// Determine where to read our input
int selector = (PASS == 0) ? 0 : d_selectors[PASS & 0x1];
if (selector) d_in_keys = d_out_keys;
// Calculate our threadblock's range
int block_offset, block_elements;
if (blockIdx.x < work_decomposition.num_big_blocks) {
block_offset = work_decomposition.big_block_elements * blockIdx.x;
block_elements = work_decomposition.big_block_elements;
} else {
block_offset = (work_decomposition.normal_block_elements * blockIdx.x) + (work_decomposition.num_big_blocks * TILE_ELEMENTS);
block_elements = work_decomposition.normal_block_elements;
}
int out_of_bounds = block_offset + block_elements;
if (blockIdx.x == gridDim.x - 1) {
if (work_decomposition.extra_elements_last_block > 0) {
out_of_bounds -= TILE_ELEMENTS;
}
out_of_bounds += work_decomposition.extra_elements_last_block;
}
// Perform reduction pass
ReductionPass<K, NONE, BIT, RADIX_BITS, RADIX_DIGITS, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor, true>(
d_in_keys,
d_spine,
block_offset,
encoded_reduction_col,
scan_lanes,
out_of_bounds);
}
} // namespace b40c
|
eab641ce1e9ee98290aa1392b446993992287bf9.hip | // !!! This is a file automatically generated by hipify!!!
#define GLEW_STATIC
#include <GL/glew.h>
#include <GL/glut.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_gl_interop.h"
#include "helper_math.h"
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <ctime>
#include <iostream>
int width = 800, height = 600;
float vboWindowScale = 0.5f;
int widthScaled, heightScaled;
double fovy = 60.0, aspect, zNear = 1.0, zFar = 1024.0;
unsigned int FPS = 60, msecs = 1000 / FPS;
double eyeX, eyeY, eyeZ;
bool animation = true;
GLfloat xRotated, yRotated, zRotated;
GLuint R, G, B = 0;
//ilosc kolorow
const int csize = 7;
//kolory dla obiektow
int4 colors[csize];
unsigned int vertVBO = 0, normalVBO = 0;
struct cudaGraphicsResource *cudaVertVBO = NULL, *cudaNormalVBO = NULL;
int vertSize, normalSize;
unsigned int pbo = 0;
struct cudaGraphicsResource *cudaPBO = NULL;
int pboSize;
void initialize();
void resetCamera();
void createVBO();
void deleteVBO();
void recreateVBO();
void createPBO();
void deletePBO();
void recreatePBO();
int exitHandler();
void display();
void reshape(int w, int h);
void keyboard(unsigned char key, int x, int y);
void special(int key, int x, int y);
void timer(int value);
void displayRGB();
void drawGlutObject(int id, GLdouble size, GLfloat X, GLfloat Y, GLfloat Z, int4 color);
void drawTeaPot(int size, GLfloat X, GLfloat Y, GLfloat Z);
void drawSphere(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void drawCube(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void drawTetrahedron(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void randCol(int4[], int size);
__device__ __forceinline__ int segmentation(int value, int prog)
{
return (value < prog) ? 0 : value;
}
__global__ void fancyKernel(uchar3 *pixels,int width, int height, int R, int G, int B)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x < width) && (y < height))
{
int i = y*width + x;
pixels[i].x = segmentation(pixels[i].x, R); // R
pixels[i].y = segmentation(pixels[i].y, G); // G
pixels[i].z = segmentation(pixels[i].z, B); // B
}
}
int main(int argc, char *argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(width, height);
glutCreateWindow(argv[0]);
initialize();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutSpecialFunc(special);
glutTimerFunc(msecs, timer, 0);
glutMainLoop();
return 0;
}
void initialize()
{
//kolory
GLfloat mat_specular[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat mat_shininess[] = { 50.0f };
GLfloat light_position[] = { 1.0f, 1.0f, 1.0f, 0.0f };
randCol(colors, csize);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glEnable(GL_DEPTH_TEST);
glEnable(GL_POINT_SMOOTH);
glEnable(GL_LINE_SMOOTH);
glPointSize(2.0f);
glLineWidth(2.0f);
glShadeModel(GL_SMOOTH);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
resetCamera();
if (hipSetDevice(0) != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
exitHandler();
exit(EXIT_FAILURE);
}
glewExperimental = true;
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "GLEW initialization failed!");
exitHandler();
exit(EXIT_FAILURE);
}
/*if (glewIsSupported("GL_VERSION_2_0") == false)
{
fprintf(stderr, "Extensions are not supported!");
exitHandler();
exit(EXIT_FAILURE);
}*/
//utworzenie buforw wierzchokw
createVBO();
//utworzenie Pixel Buffer Object
createPBO();
}
void resetCamera()
{
eyeX = 2.0;
eyeY = 2.0;
eyeZ = 2.0;
}
void createVBO()
{
widthScaled = int(width*vboWindowScale);
heightScaled = int(height*vboWindowScale);
vertSize = 4 * widthScaled*heightScaled;
//utworzenie identyfikatora obiektu buforowego
glGenBuffers(1, &vertVBO);
//dowiazanie identyfikatora do obiektu buforowego
//GL_ARRAY_BUFFER - obiekt buforowy tablic wierzcholkow
glBindBuffer(GL_ARRAY_BUFFER, vertVBO);
//ladowanie danych do obiektu buforowego
//GL_DYNAMIC_DRAW - wielokrotne pobieranie danych i wielokrotne ich wykorzystanie do zapisu do obiektu OpenGL,
glBufferData(GL_ARRAY_BUFFER, vertSize*sizeof(float), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//rejestacja bufora OpenGL
hipGraphicsGLRegisterBuffer(&cudaVertVBO, vertVBO, hipGraphicsRegisterFlagsNone);
normalSize = 3 * widthScaled*heightScaled;
glGenBuffers(1, &normalVBO);
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glBufferData(GL_ARRAY_BUFFER, normalSize*sizeof(float), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
hipGraphicsGLRegisterBuffer(&cudaNormalVBO, normalVBO, hipGraphicsMapFlagsWriteDiscard);
}
//ususuwanie obiektow VBO
void deleteVBO()
{
hipGraphicsUnregisterResource(cudaVertVBO);
cudaVertVBO = NULL;
glDeleteBuffers(1, &vertVBO);
vertVBO = 0;
hipGraphicsUnregisterResource(cudaNormalVBO);
cudaNormalVBO = NULL;
glDeleteBuffers(1, &normalVBO);
normalVBO = 0;
}
void recreateVBO()
{
deleteVBO();
createVBO();
}
// Utworzenie Pixel Buffer Object
// Rodzaj buforu OpenGl, suzacy do przechowywania pikseli
void createPBO()
{
pboSize = 3 * width*height;
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, pboSize*sizeof(char), 0, GL_DYNAMIC_COPY);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
hipGraphicsGLRegisterBuffer(&cudaPBO, pbo, hipGraphicsRegisterFlagsNone);
}
void deletePBO()
{
hipGraphicsUnregisterResource(cudaPBO);
cudaPBO = NULL;
glDeleteBuffers(1, &pbo);
pbo = 0;
}
void recreatePBO()
{
deletePBO();
createPBO();
}
int exitHandler()
{
deleteVBO();
deletePBO();
if (hipDeviceReset() != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
void display()
{
hipError_t err = hipSuccess;
uchar3 *pixels = NULL;
size_t num_bytes;
dim3 block_dim(16, 16);
dim3 grid_dim((width + block_dim.x - 1) / block_dim.x, (height + block_dim.y - 1) / block_dim.y);
dim3 grid_dim_scaled((widthScaled + block_dim.x - 1) / block_dim.x, (heightScaled + block_dim.y - 1) / block_dim.y);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
//define a viewing transformation
gluLookAt(eyeX, eyeY, eyeZ,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0);
glMatrixMode(GL_MODELVIEW);
// czysczenie bufora rysowania
glClear(GL_COLOR_BUFFER_BIT);
////
glLoadIdentity();
drawGlutObject(1, 0.5, 0.0, 0.0, -3.5, colors[0]);
drawGlutObject(2, 0.5, 1.0, 0.0, -3.5, colors[1]);
drawGlutObject(3, 0.5, -1.0, 1.0, -2.5, colors[2]);
drawGlutObject(4, 0.5, 1.0, 1.0, -4.5, colors[3]);
drawGlutObject(5, 0.5, -1.0, -1.0, -4.5, colors[4]);
drawGlutObject(6, 0.5, -1.0, 2.0, -3.5, colors[5]);
drawGlutObject(7, 0.5, 0.5, 1.0, -7.5, colors[6]);
drawGlutObject(8, -0.5, 0.5, -1.0, -7.5, colors[6]);
// PBO.
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
hipGraphicsMapResources(1, &cudaPBO, 0);
hipGraphicsResourceGetMappedPointer((void**)&pixels, &num_bytes, cudaPBO);
fancyKernel << <grid_dim, block_dim >> >(pixels, width, height, R, G, B);
err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "fancyKernel kernel launch failed: %s\n", hipGetErrorString(err));
exitHandler();
exit(EXIT_FAILURE);
}
err = hipDeviceSynchronize();
if (hipSuccess != err)
{
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching fancyKernel kernel!\n", err);
exitHandler();
exit(EXIT_FAILURE);
}
hipGraphicsUnmapResources(1, &cudaPBO, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glDrawPixels(width, height, GL_RGB, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
void reshape(int w, int h)
{
width = (w > 0) ? w : 1;
height = (h > 0) ? h : 1;
aspect = (double)width / (double)height;
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(fovy, aspect, zNear, zFar);
glMatrixMode(GL_MODELVIEW);
recreateVBO();
recreatePBO();
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 'q': (R < 255) ? R += 1 : 255; break;
case 'a': (R > 0) ? R -= 1 : 0; break;
case 'w': (G < 255) ? G += 1 : 255; break;
case 's': (G > 0) ? G -= 1 : 0; break;
case 'e': (B < 255) ? B += 1 : 255; break;
case 'd': (B > 0) ? B -= 1 : 0; break;
case 'r':
case 'R': resetCamera(); break;
case 32:
{
if (animation = !animation)
{
glutTimerFunc(msecs, timer, 0);
}
break;
}
case 27: exit(exitHandler()); break;
default:;
}
glutPostRedisplay();
/*wyswietlanie wartosci RGB*/
system("cls");
std::cout << "R: " << R << std::endl << "G: " << G << std::endl << "B: " << B << std::endl;
}
void special(int key, int x, int y)
{
switch (key)
{
case GLUT_KEY_LEFT: eyeX -= 0.5; break;
case GLUT_KEY_RIGHT: eyeX += 0.5; break;
case GLUT_KEY_UP: eyeY += 0.5; break;
case GLUT_KEY_DOWN: eyeY -= 0.5; break;
case GLUT_KEY_HOME: eyeZ -= 0.5; break;
case GLUT_KEY_END: eyeZ += 0.5; break;
default:;
}
glutPostRedisplay();
}
void timer(int value)
{
if (animation)
{
glutPostRedisplay();
glutTimerFunc(msecs, timer, 0);
}
}
void randCol(int4 color[], int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
int temp = rand() % 255;
color[i] = { rand() % 255, rand() % 255, rand() % 255, rand() % 255 };
}
}
void drawTeaPot(int size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidTeapot(size);
glPopMatrix();
}
void drawGlutObject(int id, GLdouble size, GLfloat X, GLfloat Y, GLfloat Z, int4 color) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(color.x, color.y, color.z);
switch (id)
{
case 1: glutSolidTeapot(size); break;
case 2: glutSolidSphere(size, 50, 50); break;
case 3: glutSolidCube(size); break;
case 4: glutSolidTetrahedron(); break;
case 5: glutSolidIcosahedron(); break;
case 6: glutSolidOctahedron(); break;
case 7: glutSolidDodecahedron(); break;
case 8: glutSolidTorus(size, 10, 1, 1);
default:;
}
glPopMatrix();
}
void drawSphere(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidSphere(size, 50, 50);
glPopMatrix();
}
void drawCube(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();//
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidCube(size);
glPopMatrix();
}
void drawTetrahedron(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidTetrahedron();
glPopMatrix();
} | eab641ce1e9ee98290aa1392b446993992287bf9.cu | #define GLEW_STATIC
#include <GL/glew.h>
#include <GL/glut.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_gl_interop.h"
#include "helper_math.h"
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <ctime>
#include <iostream>
int width = 800, height = 600;
float vboWindowScale = 0.5f;
int widthScaled, heightScaled;
double fovy = 60.0, aspect, zNear = 1.0, zFar = 1024.0;
unsigned int FPS = 60, msecs = 1000 / FPS;
double eyeX, eyeY, eyeZ;
bool animation = true;
GLfloat xRotated, yRotated, zRotated;
GLuint R, G, B = 0;
//ilosc kolorow
const int csize = 7;
//kolory dla obiektow
int4 colors[csize];
unsigned int vertVBO = 0, normalVBO = 0;
struct cudaGraphicsResource *cudaVertVBO = NULL, *cudaNormalVBO = NULL;
int vertSize, normalSize;
unsigned int pbo = 0;
struct cudaGraphicsResource *cudaPBO = NULL;
int pboSize;
void initialize();
void resetCamera();
void createVBO();
void deleteVBO();
void recreateVBO();
void createPBO();
void deletePBO();
void recreatePBO();
int exitHandler();
void display();
void reshape(int w, int h);
void keyboard(unsigned char key, int x, int y);
void special(int key, int x, int y);
void timer(int value);
void displayRGB();
void drawGlutObject(int id, GLdouble size, GLfloat X, GLfloat Y, GLfloat Z, int4 color);
void drawTeaPot(int size, GLfloat X, GLfloat Y, GLfloat Z);
void drawSphere(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void drawCube(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void drawTetrahedron(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z);
void randCol(int4[], int size);
__device__ __forceinline__ int segmentation(int value, int prog)
{
return (value < prog) ? 0 : value;
}
__global__ void fancyKernel(uchar3 *pixels,int width, int height, int R, int G, int B)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x < width) && (y < height))
{
int i = y*width + x;
pixels[i].x = segmentation(pixels[i].x, R); // R
pixels[i].y = segmentation(pixels[i].y, G); // G
pixels[i].z = segmentation(pixels[i].z, B); // B
}
}
int main(int argc, char *argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(width, height);
glutCreateWindow(argv[0]);
initialize();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutSpecialFunc(special);
glutTimerFunc(msecs, timer, 0);
glutMainLoop();
return 0;
}
void initialize()
{
//kolory
GLfloat mat_specular[] = { 1.0f, 1.0f, 1.0f, 1.0f };
GLfloat mat_shininess[] = { 50.0f };
GLfloat light_position[] = { 1.0f, 1.0f, 1.0f, 0.0f };
randCol(colors, csize);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glEnable(GL_DEPTH_TEST);
glEnable(GL_POINT_SMOOTH);
glEnable(GL_LINE_SMOOTH);
glPointSize(2.0f);
glLineWidth(2.0f);
glShadeModel(GL_SMOOTH);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
resetCamera();
if (cudaSetDevice(0) != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exitHandler();
exit(EXIT_FAILURE);
}
glewExperimental = true;
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "GLEW initialization failed!");
exitHandler();
exit(EXIT_FAILURE);
}
/*if (glewIsSupported("GL_VERSION_2_0") == false)
{
fprintf(stderr, "Extensions are not supported!");
exitHandler();
exit(EXIT_FAILURE);
}*/
//utworzenie buforów wierzchołków
createVBO();
//utworzenie Pixel Buffer Object
createPBO();
}
void resetCamera()
{
eyeX = 2.0;
eyeY = 2.0;
eyeZ = 2.0;
}
void createVBO()
{
widthScaled = int(width*vboWindowScale);
heightScaled = int(height*vboWindowScale);
vertSize = 4 * widthScaled*heightScaled;
//utworzenie identyfikatora obiektu buforowego
glGenBuffers(1, &vertVBO);
//dowiazanie identyfikatora do obiektu buforowego
//GL_ARRAY_BUFFER - obiekt buforowy tablic wierzcholkow
glBindBuffer(GL_ARRAY_BUFFER, vertVBO);
//ladowanie danych do obiektu buforowego
//GL_DYNAMIC_DRAW - wielokrotne pobieranie danych i wielokrotne ich wykorzystanie do zapisu do obiektu OpenGL,
glBufferData(GL_ARRAY_BUFFER, vertSize*sizeof(float), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//rejestacja bufora OpenGL
cudaGraphicsGLRegisterBuffer(&cudaVertVBO, vertVBO, cudaGraphicsRegisterFlagsNone);
normalSize = 3 * widthScaled*heightScaled;
glGenBuffers(1, &normalVBO);
glBindBuffer(GL_ARRAY_BUFFER, normalVBO);
glBufferData(GL_ARRAY_BUFFER, normalSize*sizeof(float), 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaGraphicsGLRegisterBuffer(&cudaNormalVBO, normalVBO, cudaGraphicsMapFlagsWriteDiscard);
}
//ususuwanie obiektow VBO
void deleteVBO()
{
cudaGraphicsUnregisterResource(cudaVertVBO);
cudaVertVBO = NULL;
glDeleteBuffers(1, &vertVBO);
vertVBO = 0;
cudaGraphicsUnregisterResource(cudaNormalVBO);
cudaNormalVBO = NULL;
glDeleteBuffers(1, &normalVBO);
normalVBO = 0;
}
void recreateVBO()
{
deleteVBO();
createVBO();
}
// Utworzenie Pixel Buffer Object
// Rodzaj buforu OpenGl, słuzacy do przechowywania pikseli
void createPBO()
{
pboSize = 3 * width*height;
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, pboSize*sizeof(char), 0, GL_DYNAMIC_COPY);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsRegisterFlagsNone);
}
void deletePBO()
{
cudaGraphicsUnregisterResource(cudaPBO);
cudaPBO = NULL;
glDeleteBuffers(1, &pbo);
pbo = 0;
}
void recreatePBO()
{
deletePBO();
createPBO();
}
int exitHandler()
{
deleteVBO();
deletePBO();
if (cudaDeviceReset() != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
void display()
{
cudaError_t err = cudaSuccess;
uchar3 *pixels = NULL;
size_t num_bytes;
dim3 block_dim(16, 16);
dim3 grid_dim((width + block_dim.x - 1) / block_dim.x, (height + block_dim.y - 1) / block_dim.y);
dim3 grid_dim_scaled((widthScaled + block_dim.x - 1) / block_dim.x, (heightScaled + block_dim.y - 1) / block_dim.y);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
//define a viewing transformation
gluLookAt(eyeX, eyeY, eyeZ,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0);
glMatrixMode(GL_MODELVIEW);
// czysczenie bufora rysowania
glClear(GL_COLOR_BUFFER_BIT);
////
glLoadIdentity();
drawGlutObject(1, 0.5, 0.0, 0.0, -3.5, colors[0]);
drawGlutObject(2, 0.5, 1.0, 0.0, -3.5, colors[1]);
drawGlutObject(3, 0.5, -1.0, 1.0, -2.5, colors[2]);
drawGlutObject(4, 0.5, 1.0, 1.0, -4.5, colors[3]);
drawGlutObject(5, 0.5, -1.0, -1.0, -4.5, colors[4]);
drawGlutObject(6, 0.5, -1.0, 2.0, -3.5, colors[5]);
drawGlutObject(7, 0.5, 0.5, 1.0, -7.5, colors[6]);
drawGlutObject(8, -0.5, 0.5, -1.0, -7.5, colors[6]);
// PBO.
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
cudaGraphicsMapResources(1, &cudaPBO, 0);
cudaGraphicsResourceGetMappedPointer((void**)&pixels, &num_bytes, cudaPBO);
fancyKernel << <grid_dim, block_dim >> >(pixels, width, height, R, G, B);
err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "fancyKernel kernel launch failed: %s\n", cudaGetErrorString(err));
exitHandler();
exit(EXIT_FAILURE);
}
err = cudaDeviceSynchronize();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching fancyKernel kernel!\n", err);
exitHandler();
exit(EXIT_FAILURE);
}
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glDrawPixels(width, height, GL_RGB, GL_UNSIGNED_BYTE, 0);
glutSwapBuffers();
}
void reshape(int w, int h)
{
width = (w > 0) ? w : 1;
height = (h > 0) ? h : 1;
aspect = (double)width / (double)height;
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(fovy, aspect, zNear, zFar);
glMatrixMode(GL_MODELVIEW);
recreateVBO();
recreatePBO();
}
void keyboard(unsigned char key, int x, int y)
{
switch (key)
{
case 'q': (R < 255) ? R += 1 : 255; break;
case 'a': (R > 0) ? R -= 1 : 0; break;
case 'w': (G < 255) ? G += 1 : 255; break;
case 's': (G > 0) ? G -= 1 : 0; break;
case 'e': (B < 255) ? B += 1 : 255; break;
case 'd': (B > 0) ? B -= 1 : 0; break;
case 'r':
case 'R': resetCamera(); break;
case 32:
{
if (animation = !animation)
{
glutTimerFunc(msecs, timer, 0);
}
break;
}
case 27: exit(exitHandler()); break;
default:;
}
glutPostRedisplay();
/*wyswietlanie wartosci RGB*/
system("cls");
std::cout << "R: " << R << std::endl << "G: " << G << std::endl << "B: " << B << std::endl;
}
void special(int key, int x, int y)
{
switch (key)
{
case GLUT_KEY_LEFT: eyeX -= 0.5; break;
case GLUT_KEY_RIGHT: eyeX += 0.5; break;
case GLUT_KEY_UP: eyeY += 0.5; break;
case GLUT_KEY_DOWN: eyeY -= 0.5; break;
case GLUT_KEY_HOME: eyeZ -= 0.5; break;
case GLUT_KEY_END: eyeZ += 0.5; break;
default:;
}
glutPostRedisplay();
}
void timer(int value)
{
if (animation)
{
glutPostRedisplay();
glutTimerFunc(msecs, timer, 0);
}
}
void randCol(int4 color[], int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
int temp = rand() % 255;
color[i] = { rand() % 255, rand() % 255, rand() % 255, rand() % 255 };
}
}
void drawTeaPot(int size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidTeapot(size);
glPopMatrix();
}
void drawGlutObject(int id, GLdouble size, GLfloat X, GLfloat Y, GLfloat Z, int4 color) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(color.x, color.y, color.z);
switch (id)
{
case 1: glutSolidTeapot(size); break;
case 2: glutSolidSphere(size, 50, 50); break;
case 3: glutSolidCube(size); break;
case 4: glutSolidTetrahedron(); break;
case 5: glutSolidIcosahedron(); break;
case 6: glutSolidOctahedron(); break;
case 7: glutSolidDodecahedron(); break;
case 8: glutSolidTorus(size, 10, 1, 1);
default:;
}
glPopMatrix();
}
void drawSphere(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidSphere(size, 50, 50);
glPopMatrix();
}
void drawCube(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();//
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidCube(size);
glPopMatrix();
}
void drawTetrahedron(GLdouble size, GLfloat X, GLfloat Y, GLfloat Z) {
glPushMatrix();
glTranslatef(X, Y, Z);
glRotatef(90, 0.1, 0.2, 0.5);
glColor3ub(0, 255, 0);
glutSolidTetrahedron();
glPopMatrix();
} |
d262fef6aa8abd3eaf595ac7e800717df0d63038.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmgesellcmmv.cu normal z -> s, Tue Feb 9 16:05:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc = hipCreateChannelDesc(32, 32, 32, 32,
hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D_tex<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<true>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_1_3D<false>), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_4_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_8_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_16_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
hipLaunchKernelGGL(( zmgesellptmv_kernel_32_3D<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
| d262fef6aa8abd3eaf595ac7e800717df0d63038.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmgesellcmmv.cu normal z -> s, Tue Feb 9 16:05:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
//#define TEXTURE
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
dcolind[ offset + idx + blocksize*k ];
dot += val * dx[ col*num_vecs+idy ];
}
if (betazero) {
dy[ row+idy*num_rows ] = dot*alpha;
} else {
dy[ row+idy*num_rows ] = dot*alpha + beta*dy [ row+idy*num_rows ];
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
const float * __restrict__ dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
float * dx,
float beta,
float * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int vec = idz*num_rows;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col+vec ];
}
shared[ldz] = dot;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
__syncthreads();
if ( idx < 8 ) shared[ldz]+=shared[ldz+blocksize*8];
__syncthreads();
if ( idx < 4 ) shared[ldz]+=shared[ldz+blocksize*4];
__syncthreads();
if ( idx < 2 ) shared[ldz]+=shared[ldz+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+vec] = (shared[ldz]+shared[ldz+blocksize*1])*alpha;
} else {
dy[row+vec] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row+vec];
}
}
}
}
}
/************************* same but using texture mem *************************/
// SELLP SpMV kernel 2D grid - for large number of vectors
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_1_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
int idx = threadIdx.x; // local row
int idy = threadIdx.y; // vector
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idx; // global row index
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int max_ = (drowptr[ bdx+1 ]-offset)/blocksize;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + idx + blocksize*k ];
int col =
num_vecs * dcolind[ offset + idx + blocksize*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idy );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
if (betazero) {
dy[row+num_rows*idy*2] =
dot1*alpha;
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha;
} else {
dy[row+num_rows*idy*2] =
dot1*alpha
+ beta*dy [row*num_vecs+idy*2];
dy[row+num_rows*idy*2+num_rows] =
dot2*alpha
+ beta*dy [row*num_vecs+idy*2+1];
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_4_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_8_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
__syncthreads();
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_16_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
__syncthreads();
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
// SELLP SpMV kernel 3D grid
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zmgesellptmv_kernel_32_3D_tex(
int num_rows,
int num_cols,
int num_vecs,
int blocksize,
int T,
float alpha,
float * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
float beta,
float * dy)
{
#if defined(PRECISION_d) && defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int idz = threadIdx.z; // vector
int ldx = idx * blocksize + idy;
int ldz = idz * blocksize * T + idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
int sv = num_vecs/2 * blocksize * T;
extern __shared__ float shared[];
if (row < num_rows ) {
float dot1 = MAGMA_S_MAKE(0.0, 0.0);
float dot2 = MAGMA_S_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
float val =
dval[ offset + ldx + block*k ];
int col =
num_vecs * dcolind[ offset + ldx + block*k ];
int4 v = tex1Dfetch<int4>(texdx, col/2 + idz );
dot1 += val * __hiloint2float(v.y, v.x);
dot2 += val * __hiloint2float(v.w, v.z);
}
shared[ldz] = dot1;
shared[ldz+sv] = dot2;
__syncthreads();
if ( idx < 16 ) {
shared[ldz]+=shared[ldz+blocksize*16];
shared[ldz+sv]+=shared[ldz+sv+blocksize*16];
__syncthreads();
if ( idx < 8 ) {
shared[ldz]+=shared[ldz+blocksize*8];
shared[ldz+sv]+=shared[ldz+sv+blocksize*8];
}
if ( idx < 4 ) {
shared[ldz]+=shared[ldz+blocksize*4];
shared[ldz+sv]+=shared[ldz+sv+blocksize*4];
}
if ( idx < 2 ) {
shared[ldz]+=shared[ldz+blocksize*2];
shared[ldz+sv]+=shared[ldz+sv+blocksize*2];
}
__syncthreads();
if ( idx == 0 ) {
if (betazero) {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha;
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha;
} else {
dy[row+num_rows*idz*2] =
(shared[ldz]+shared[ldz+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2];
dy[row+num_rows*idz*2+num_rows] =
(shared[ldz+sv]+shared[ldz+sv+blocksize*1])*alpha
+ beta*dy [row*num_vecs+idz*2+1];
}
}
}
}
#endif
}
/**
Purpose
-------
This routine computes Y = alpha * A^t * X + beta * Y on the GPU.
Input format is SELLP. Note, that the input format for X is row-major
while the output format for Y is column major!
Arguments
---------
@param[in]
transA magma_trans_t
transpose A?
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs magma_int_t
number of columns in X and Y
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha float
scalar multiplier
@param[in]
dval magmaFloat_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloat_ptr
input vector x
@param[in]
beta float
scalar multiplier
@param[out]
dy magmaFloat_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
magmaFloat_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloat_ptr dx,
float beta,
magmaFloat_ptr dy,
magma_queue_t queue )
{
// using a 3D thread grid for small num_vecs, a 2D grid otherwise
int texture=0, kepler=0, precision=0;
magma_int_t arch = magma_getdevice_arch();
if ( arch > 300 )
kepler = 1;
#if defined(PRECISION_d)
precision = 1;
#endif
#if defined(TEXTURE)
texture = 1;
#endif
if ( (texture==1) && (precision==1) && (kepler==1) ) {
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc = cudaCreateChannelDesc(32, 32, 32, 32,
cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m * num_vecs * sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = (num_vecs/2) * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
dim3 block( blocksize, alignment, num_vecs/2 );
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_vecs * blocksize*alignment * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D_tex<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_1_3D_tex<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_4_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_8_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_16_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
else
zmgesellptmv_kernel_32_3D_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
} else {
if ( num_vecs%2 ==1 ) { // only multiple of 2 can be processed
printf("error: number of vectors has to be multiple of 2.\n");
return MAGMA_ERR_NOT_SUPPORTED;
}
if ( num_vecs > 8 ) // avoid running into memory problems
alignment = 1;
int num_threads = num_vecs * blocksize*alignment;
// every thread handles two vectors
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int dimgrid1 = int( sqrt( float( slices )));
int dimgrid2 = magma_ceildiv( slices, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
if ( alignment == 1) {
dim3 block( blocksize, num_vecs/2, 1 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_1_3D<true><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_1_3D<false><<< grid, block, 0, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 4) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_4_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_4_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 8) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_8_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_8_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 16) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_16_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_16_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else if ( alignment == 32) {
dim3 block( blocksize, alignment, num_vecs/2 );
if ( beta == MAGMA_S_MAKE( 0.0, 0.0 ) )
zmgesellptmv_kernel_32_3D<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
else
zmgesellptmv_kernel_32_3D<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, num_vecs, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
}
return MAGMA_SUCCESS;
}
|
ab3e19efbdf779464d52a146241ab0402c0f1f46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2.height());
output->set_height(in1_height);
auto& in1_rows = input1.rows();
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size());
PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size());
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE(platform::is_gpu_place(in1_place));
auto in2_place = input2.place();
PADDLE_ENFORCE(platform::is_gpu_place(in2_place));
auto out_place = context.GetPlace();
PADDLE_ENFORCE(platform::is_gpu_place(out_place));
memory::Copy(
boost::get<platform::CUDAPlace>(out_place), out_data,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T),
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(out_place),
out_data + in1_value.numel(),
boost::get<platform::CUDAPlace>(in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
PADDLE_ENFORCE_EQ(in1_height, out_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height);
PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, 0.0);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in1_rows.size());
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.data(), out_data, in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2->height());
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end());
auto in1_place = input1.place();
PADDLE_ENFORCE(platform::is_gpu_place(in1_place));
auto in2_place = input2->place();
PADDLE_ENFORCE(platform::is_gpu_place(in2_place));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(in2_place),
in2_data + input2_offset,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in1_rows.size());
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<
T, block_size>), dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.data(), in2_data, in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
} // namespace math
} // namespace operators
} // namespace paddle
| ab3e19efbdf779464d52a146241ab0402c0f1f46.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
struct SelectedRowsAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::SelectedRows& input2,
framework::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2.height());
output->set_height(in1_height);
auto& in1_rows = input1.rows();
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, in2_value.numel() / in2_rows.size());
PADDLE_ENFORCE_EQ(in1_row_numel, out_value->numel() / out_rows.size());
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE(platform::is_gpu_place(in1_place));
auto in2_place = input2.place();
PADDLE_ENFORCE(platform::is_gpu_place(in2_place));
auto out_place = context.GetPlace();
PADDLE_ENFORCE(platform::is_gpu_place(out_place));
memory::Copy(
boost::get<platform::CUDAPlace>(out_place), out_data,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T),
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream());
auto* in2_data = in2_value.data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(out_place),
out_data + in1_value.numel(),
boost::get<platform::CUDAPlace>(in2_place), in2_data,
in2_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAdd<platform::CUDADeviceContext, float>;
template struct SelectedRowsAdd<platform::CUDADeviceContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows, T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const framework::Tensor& input2, framework::Tensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
PADDLE_ENFORCE_EQ(in1_height, out_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2.numel() / in1_height);
PADDLE_ENFORCE_EQ(in1_row_numel, output->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
SetConstant<platform::CUDADeviceContext, T> functor;
functor(context, output, 0.0);
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in1_rows.size());
SelectedRowsAddTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.data(), out_data, in1_row_numel);
auto out_eigen = framework::EigenVector<T>::Flatten(*output);
auto in2_eigen = framework::EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTensor<platform::CUDADeviceContext, double>;
template <typename T>
struct SelectedRowsAddTo<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
const int64_t input2_offset,
framework::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(in1_height, input2->height());
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end());
auto in1_place = input1.place();
PADDLE_ENFORCE(platform::is_gpu_place(in1_place));
auto in2_place = input2->place();
PADDLE_ENFORCE(platform::is_gpu_place(in2_place));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory::Copy(boost::get<platform::CUDAPlace>(in2_place),
in2_data + input2_offset,
boost::get<platform::CUDAPlace>(in1_place), in1_data,
in1_value.numel() * sizeof(T), context.stream());
}
};
template struct SelectedRowsAddTo<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddTo<platform::CUDADeviceContext, int64_t>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& input1,
framework::Tensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(in1_height, in2_dims[0]);
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(in1_row_numel, input2->numel() / in1_height);
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(1, in1_rows.size());
SelectedRowsAddToTensorKernel<
T, block_size><<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.data(), in2_data, in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, float>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, double>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int>;
template struct SelectedRowsAddToTensor<platform::CUDADeviceContext, int64_t>;
} // namespace math
} // namespace operators
} // namespace paddle
|
0ff486c712af1411a0217882539d45cc6fbfe1c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "freshman.h"
#define N_REPEAT 10
#define N_SEGMENT 4
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx < N)
//for delay
{
for(int j=0;j<N_REPEAT;j++)
res[idx]=a[idx]+b[idx];
}
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
double iStart,iElaps;
iStart=cpuSecond();
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float * a_h,*b_h,*res_h,*res_from_gpu_h;
CHECK(hipHostMalloc((float**)&a_h,nByte,hipHostMallocDefault));
CHECK(hipHostMalloc((float**)&b_h,nByte,hipHostMallocDefault));
CHECK(hipHostMalloc((float**)&res_h,nByte,hipHostMallocDefault));
CHECK(hipHostMalloc((float**)&res_from_gpu_h,nByte,hipHostMallocDefault));
hipMemset(res_h,0,nByte);
hipMemset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(hipMalloc((float**)&a_d,nByte));
CHECK(hipMalloc((float**)&b_d,nByte));
CHECK(hipMalloc((float**)&res_d,nByte));
initialData(a_h,nElem);
initialData(b_h,nElem);
sumArrays(a_h,b_h,res_h,nElem);
dim3 block(512);
dim3 grid((nElem-1)/block.x+1);
//asynchronous calculation
int iElem=nElem/N_SEGMENT;
hipStream_t stream[N_SEGMENT];
for(int i=0;i<N_SEGMENT;i++)
{
CHECK(hipStreamCreate(&stream[i]));
}
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
CHECK(hipMemcpyAsync(&a_d[ioffset],&a_h[ioffset],nByte/N_SEGMENT,hipMemcpyHostToDevice,stream[i]));
CHECK(hipMemcpyAsync(&b_d[ioffset],&b_h[ioffset],nByte/N_SEGMENT,hipMemcpyHostToDevice,stream[i]));
}
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
hipLaunchKernelGGL(( sumArraysGPU), dim3(grid),dim3(block),0,stream[i], &a_d[ioffset],&b_d[ioffset],&res_d[ioffset],iElem);
}
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
CHECK(hipMemcpyAsync(&res_from_gpu_h[ioffset],&res_d[ioffset],nByte/N_SEGMENT,hipMemcpyDeviceToHost,stream[i]));
}
//timer
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
iElaps=cpuSecond()-iStart;
printf("Asynchronous Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
checkResult(res_h,res_from_gpu_h,nElem);
for(int i=0;i<N_SEGMENT;i++)
{
CHECK(hipStreamDestroy(stream[i]));
}
hipFree(a_d);
hipFree(b_d);
hipFree(a_h);
hipFree(b_h);
hipFree(res_h);
hipFree(res_from_gpu_h);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 0ff486c712af1411a0217882539d45cc6fbfe1c7.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "freshman.h"
#define N_REPEAT 10
#define N_SEGMENT 4
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx < N)
//for delay
{
for(int j=0;j<N_REPEAT;j++)
res[idx]=a[idx]+b[idx];
}
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
double iStart,iElaps;
iStart=cpuSecond();
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float * a_h,*b_h,*res_h,*res_from_gpu_h;
CHECK(cudaHostAlloc((float**)&a_h,nByte,cudaHostAllocDefault));
CHECK(cudaHostAlloc((float**)&b_h,nByte,cudaHostAllocDefault));
CHECK(cudaHostAlloc((float**)&res_h,nByte,cudaHostAllocDefault));
CHECK(cudaHostAlloc((float**)&res_from_gpu_h,nByte,cudaHostAllocDefault));
cudaMemset(res_h,0,nByte);
cudaMemset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(cudaMalloc((float**)&a_d,nByte));
CHECK(cudaMalloc((float**)&b_d,nByte));
CHECK(cudaMalloc((float**)&res_d,nByte));
initialData(a_h,nElem);
initialData(b_h,nElem);
sumArrays(a_h,b_h,res_h,nElem);
dim3 block(512);
dim3 grid((nElem-1)/block.x+1);
//asynchronous calculation
int iElem=nElem/N_SEGMENT;
cudaStream_t stream[N_SEGMENT];
for(int i=0;i<N_SEGMENT;i++)
{
CHECK(cudaStreamCreate(&stream[i]));
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
CHECK(cudaMemcpyAsync(&a_d[ioffset],&a_h[ioffset],nByte/N_SEGMENT,cudaMemcpyHostToDevice,stream[i]));
CHECK(cudaMemcpyAsync(&b_d[ioffset],&b_h[ioffset],nByte/N_SEGMENT,cudaMemcpyHostToDevice,stream[i]));
}
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
sumArraysGPU<<<grid,block,0,stream[i]>>>(&a_d[ioffset],&b_d[ioffset],&res_d[ioffset],iElem);
}
for(int i=0;i<N_SEGMENT;i++)
{
int ioffset=i*iElem;
CHECK(cudaMemcpyAsync(&res_from_gpu_h[ioffset],&res_d[ioffset],nByte/N_SEGMENT,cudaMemcpyDeviceToHost,stream[i]));
}
//timer
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
iElaps=cpuSecond()-iStart;
printf("Asynchronous Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
checkResult(res_h,res_from_gpu_h,nElem);
for(int i=0;i<N_SEGMENT;i++)
{
CHECK(cudaStreamDestroy(stream[i]));
}
cudaFree(a_d);
cudaFree(b_d);
cudaFree(a_h);
cudaFree(b_h);
cudaFree(res_h);
cudaFree(res_from_gpu_h);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
17acc56b1a52a339aa8bce1e6d624899e6df9691.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <err_common.hpp>
#include <solve.hpp>
#include <platform.hpp>
#include <rocblas.h>
#include <identity.hpp>
#include <iostream>
#include <memory.hpp>
#include <copy.hpp>
#include <transpose.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <cstdio>
namespace cuda
{
//cusolverStatus_t cusolverDn<>getrs(
// hipsolverDnHandle_t handle,
// hipblasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t
{
typedef cusolverStatus_t (*getrs_func_def) (
hipsolverDnHandle_t,
hipblasOperation_t,
int, int,
const T *, int,
const int *,
T *, int,
int *);
};
#define SOLVE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
SOLVE_FUNC_DEF( getrs )
SOLVE_FUNC(getrs , float , S)
SOLVE_FUNC(getrs , double , D)
SOLVE_FUNC(getrs , cfloat , C)
SOLVE_FUNC(getrs , cdouble, Z)
//cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
//cusolverStatus_t cusolverDn<>geqrf(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
//cusolverStatus_t cusolverDn<>mqr(
// hipsolverDnHandle_t handle,
// hipblasSideMode_t side, hipblasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_func_def) (
hipsolverDnHandle_t, int, int,
T *, int,
T *,
T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_buf_func_def) (
hipsolverDnHandle_t, int, int,
T *, int, int *);
};
template<typename T>
struct mqr_solve_func_def_t
{
typedef cusolverStatus_t (*mqr_solve_func_def) (
hipsolverDnHandle_t,
hipblasSideMode_t, hipblasOperation_t,
int, int, int,
const T *, int,
const T *,
T *, int,
T *, int,
int *);
};
#define QR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func(); \
#define QR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \
{ return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
QR_FUNC_DEF( geqrf )
QR_FUNC(geqrf , float , S)
QR_FUNC(geqrf , double , D)
QR_FUNC(geqrf , cfloat , C)
QR_FUNC(geqrf , cdouble, Z)
#define MQR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \
MQR_FUNC_DEF( mqr )
MQR_FUNC(mqr , float , Sormqr)
MQR_FUNC(mqr , double , Dormqr)
MQR_FUNC(mqr , cfloat , Cunmqr)
MQR_FUNC(mqr , cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array< T > B = copyArray<T>(b);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(),
HIPBLAS_OP_N,
N, NRHS,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(),
HIPBLAS_OP_N,
N, K,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
hipblasOperation_t trans() { return HIPBLAS_OP_T; }
template<> hipblasOperation_t trans<cfloat>() { return HIPBLAS_OP_C; }
template<> hipblasOperation_t trans<cdouble>() { return HIPBLAS_OP_C; }
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(),
HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N,
B.dims()[0],
B.dims()[1],
A.dims()[0],
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
memFree(workspace);
memFree(info);
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(),
HIPBLAS_SIDE_LEFT,
trans<T>(),
M, K, N,
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
memFree(workspace);
memFree(info);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options)
{
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
if (options & AF_MAT_UPPER ||
options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if(a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
| 17acc56b1a52a339aa8bce1e6d624899e6df9691.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <err_common.hpp>
#include <solve.hpp>
#include <platform.hpp>
#include <cublas_v2.h>
#include <identity.hpp>
#include <iostream>
#include <memory.hpp>
#include <copy.hpp>
#include <transpose.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <blas.hpp>
#include <lu.hpp>
#include <qr.hpp>
#include <cstdio>
namespace cuda
{
//cusolverStatus_t cusolverDn<>getrs(
// cusolverDnHandle_t handle,
// cublasOperation_t trans,
// int n, int nrhs,
// const <> *A, int lda,
// const int *devIpiv,
// <> *B, int ldb,
// int *devInfo );
template<typename T>
struct getrs_func_def_t
{
typedef cusolverStatus_t (*getrs_func_def) (
cusolverDnHandle_t,
cublasOperation_t,
int, int,
const T *, int,
const int *,
T *, int,
int *);
};
#define SOLVE_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func();
#define SOLVE_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
SOLVE_FUNC_DEF( getrs )
SOLVE_FUNC(getrs , float , S)
SOLVE_FUNC(getrs , double , D)
SOLVE_FUNC(getrs , cfloat , C)
SOLVE_FUNC(getrs , cdouble, Z)
//cusolverStatus_t cusolverDn<>geqrf_bufferSize(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// int *Lwork );
//
//cusolverStatus_t cusolverDn<>geqrf(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A, int lda,
// <> *TAU,
// <> *Workspace,
// int Lwork, int *devInfo );
//
//cusolverStatus_t cusolverDn<>mqr(
// cusolverDnHandle_t handle,
// cublasSideMode_t side, cublasOperation_t trans,
// int m, int n, int k,
// const double *A, int lda,
// const double *tau,
// double *C, int ldc,
// double *work,
// int lwork, int *devInfo);
template<typename T>
struct geqrf_solve_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_func_def) (
cusolverDnHandle_t, int, int,
T *, int,
T *,
T *,
int, int *);
};
template<typename T>
struct geqrf_solve_buf_func_def_t
{
typedef cusolverStatus_t (*geqrf_solve_buf_func_def) (
cusolverDnHandle_t, int, int,
T *, int, int *);
};
template<typename T>
struct mqr_solve_func_def_t
{
typedef cusolverStatus_t (*mqr_solve_func_def) (
cusolverDnHandle_t,
cublasSideMode_t, cublasOperation_t,
int, int, int,
const T *, int,
const T *,
T *, int,
T *, int,
int *);
};
#define QR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func(); \
\
template<typename T> \
static typename FUNC##_solve_buf_func_def_t<T>::FUNC##_solve_buf_func_def \
FUNC##_solve_buf_func(); \
#define QR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def FUNC##_solve_buf_func<TYPE>() \
{ return (FUNC##_solve_buf_func_def_t<TYPE>::FUNC##_solve_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
QR_FUNC_DEF( geqrf )
QR_FUNC(geqrf , float , S)
QR_FUNC(geqrf , double , D)
QR_FUNC(geqrf , cfloat , C)
QR_FUNC(geqrf , cdouble, Z)
#define MQR_FUNC_DEF( FUNC ) \
template<typename T> \
static typename FUNC##_solve_func_def_t<T>::FUNC##_solve_func_def \
FUNC##_solve_func();
#define MQR_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def \
FUNC##_solve_func<TYPE>() \
{ return (FUNC##_solve_func_def_t<TYPE>::FUNC##_solve_func_def)&cusolverDn##PREFIX; } \
MQR_FUNC_DEF( mqr )
MQR_FUNC(mqr , float , Sormqr)
MQR_FUNC(mqr , double , Dormqr)
MQR_FUNC(mqr , cfloat , Cunmqr)
MQR_FUNC(mqr , cdouble, Zunmqr)
template<typename T>
Array<T> solveLU(const Array<T> &A, const Array<int> &pivot,
const Array<T> &b, const af_mat_prop options)
{
int N = A.dims()[0];
int NRHS = b.dims()[1];
Array< T > B = copyArray<T>(b);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(),
CUBLAS_OP_N,
N, NRHS,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
Array<T> generalSolve(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> A = copyArray<T>(a);
Array<T> B = copyArray<T>(b);
Array<int> pivot = lu_inplace(A, false);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrs_func<T>()(solverDnHandle(),
CUBLAS_OP_N,
N, K,
A.get(), A.strides()[1],
pivot.get(),
B.get(), B.strides()[1],
info));
memFree(info);
return B;
}
template<typename T>
cublasOperation_t trans() { return CUBLAS_OP_T; }
template<> cublasOperation_t trans<cfloat>() { return CUBLAS_OP_C; }
template<> cublasOperation_t trans<cdouble>() { return CUBLAS_OP_C; }
template<typename T>
Array<T> leastSquares(const Array<T> &a, const Array<T> &b)
{
int M = a.dims()[0];
int N = a.dims()[1];
int K = b.dims()[1];
Array<T> B = createEmptyArray<T>(dim4());
if (M < N) {
// Least squres for this case is solved using the following
// solve(A, B) == matmul(Q, Xpad);
// Where:
// Xpad == pad(Xt, N - M, 1);
// Xt == tri_solve(R1, B);
// R1 == R(seq(M), seq(M));
// transpose(A) == matmul(Q, R);
// QR is performed on the transpose of A
Array<T> A = transpose<T>(a, true);
B = padArray<T, T>(b, dim4(N, K), scalar<T>(0));
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// R1 = R(seq(M), seq(M));
A.resetDims(dim4(M, M));
// Bt = tri_solve(R1, B);
B.resetDims(dim4(M, K));
trsm<T>(A, B, AF_MAT_CTRANS, true, true, false);
// Bpad = pad(Bt, ..)
B.resetDims(dim4(N, K));
// matmul(Q, Bpad)
CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(),
CUBLAS_SIDE_LEFT, CUBLAS_OP_N,
B.dims()[0],
B.dims()[1],
A.dims()[0],
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
memFree(workspace);
memFree(info);
} else if (M > N) {
// Least squres for this case is solved using the following
// solve(A, B) == tri_solve(R1, Bt);
// Where:
// R1 == R(seq(N), seq(N));
// Bt == matmul(transpose(Q1), B);
// Q1 == Q(span, seq(N));
// A == matmul(Q, R);
Array<T> A = copyArray<T>(a);
B = copyArray(b);
int lwork = 0;
// Get workspace needed for QR
CUSOLVER_CHECK(geqrf_solve_buf_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
Array<T> t = createEmptyArray<T>(af::dim4(min(M, N), 1, 1, 1));
int *info = memAlloc<int>(1);
// In place Perform in place QR
CUSOLVER_CHECK(geqrf_solve_func<T>()(solverDnHandle(),
A.dims()[0], A.dims()[1],
A.get(), A.strides()[1],
t.get(),
workspace, lwork,
info));
// matmul(Q1, B)
CUSOLVER_CHECK(mqr_solve_func<T>()(solverDnHandle(),
CUBLAS_SIDE_LEFT,
trans<T>(),
M, K, N,
A.get(), A.strides()[1],
t.get(),
B.get(), B.strides()[1],
workspace, lwork,
info));
// tri_solve(R1, Bt)
A.resetDims(dim4(N, N));
B.resetDims(dim4(N, K));
trsm(A, B, AF_MAT_NONE, true, true, false);
memFree(workspace);
memFree(info);
}
return B;
}
template<typename T>
Array<T> triangleSolve(const Array<T> &A, const Array<T> &b, const af_mat_prop options)
{
Array<T> B = copyArray<T>(b);
trsm(A, B,
AF_MAT_NONE, // transpose flag
options & AF_MAT_UPPER ? true : false,
true, // is_left
options & AF_MAT_DIAG_UNIT ? true : false);
return B;
}
template<typename T>
Array<T> solve(const Array<T> &a, const Array<T> &b, const af_mat_prop options)
{
if (options & AF_MAT_UPPER ||
options & AF_MAT_LOWER) {
return triangleSolve<T>(a, b, options);
}
if(a.dims()[0] == a.dims()[1]) {
return generalSolve<T>(a, b);
} else {
return leastSquares<T>(a, b);
}
}
#define INSTANTIATE_SOLVE(T) \
template Array<T> solve<T>(const Array<T> &a, const Array<T> &b, \
const af_mat_prop options); \
template Array<T> solveLU<T>(const Array<T> &A, const Array<int> &pivot, \
const Array<T> &b, const af_mat_prop options); \
INSTANTIATE_SOLVE(float)
INSTANTIATE_SOLVE(cfloat)
INSTANTIATE_SOLVE(double)
INSTANTIATE_SOLVE(cdouble)
}
|
46b79536f5d99552d6f29ed8805dd49703162ff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void dotPV_gpu( const double *p, const double *v, double *c) { *c += (*p) * (*v);
}
// CUDA kernel function
__global__ void op_cuda_dotPV(
const double *__restrict arg0,
const double *__restrict arg1,
double *arg2,
int set_size ) {
double arg2_l[1];
for ( int d=0; d<1; d++ ){
arg2_l[d]=ZERO_double;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
dotPV_gpu(arg0+n*1,
arg1+n*1,
arg2_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]);
}
}
//host stub function
void op_par_loop_dotPV(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
double*arg2h = (double *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: dotPV");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OP_reduct_h + reduct_bytes;
arg2.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg2.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_dotPV), dim3(nblocks),dim3(nthread),nshared, 0,
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1];
}
}
arg2.data = (char *)arg2h;
op_mpi_reduce(&arg2,arg2h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
}
| 46b79536f5d99552d6f29ed8805dd49703162ff9.cu | //
// auto-generated by op2.py
//
//user function
__device__ void dotPV_gpu( const double *p, const double *v, double *c) { *c += (*p) * (*v);
}
// CUDA kernel function
__global__ void op_cuda_dotPV(
const double *__restrict arg0,
const double *__restrict arg1,
double *arg2,
int set_size ) {
double arg2_l[1];
for ( int d=0; d<1; d++ ){
arg2_l[d]=ZERO_double;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
dotPV_gpu(arg0+n*1,
arg1+n*1,
arg2_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]);
}
}
//host stub function
void op_par_loop_dotPV(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
double*arg2h = (double *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: dotPV");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OP_reduct_h + reduct_bytes;
arg2.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg2.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_dotPV<<<nblocks,nthread,nshared>>>(
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1];
}
}
arg2.data = (char *)arg2h;
op_mpi_reduce(&arg2,arg2h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
}
|
99f251539f1f6b99b1df4457343f685b286b957c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/binaryop.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace { // anonymous namespace
template <typename _TargetT>
struct unary_cast {
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element);
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
// Convert source tick counts into target tick counts without blindly truncating them
// by dividing the respective duration time periods (which may not work for time before
// UNIX epoch)
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_duration<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{static_cast<typename TargetT::rep>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_duration<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element.count());
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)};
}
};
template <typename _SourceT, typename _TargetT>
struct fixed_point_unary_cast {
numeric::scale_type scale;
using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>;
using DeviceT = device_storage_type_t<FixedPointT>;
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
}
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<_SourceT>() &&
cudf::is_fixed_point<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
}
};
template <typename From, typename To>
constexpr inline auto is_supported_non_fixed_point_cast()
{
return cudf::is_fixed_width<To>() &&
// Disallow fixed_point here (requires different specialization)
!(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) &&
// Disallow conversions between timestamps and numeric
!(cudf::is_timestamp<From>() && is_numeric<To>()) &&
!(cudf::is_timestamp<To>() && is_numeric<From>());
}
template <typename From, typename To>
constexpr inline auto is_supported_fixed_point_cast()
{
return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) ||
(cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) ||
(cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>());
}
template <typename From, typename To>
constexpr inline auto is_supported_cast()
{
return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>();
}
template <typename From, typename To>
struct device_cast {
__device__ To operator()(From element) { return static_cast<To>(element); }
};
/**
* @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new
* @p scale
*
* @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`)
* @param input Input `column_view`
* @param scale `scale` of the returned `column`
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return std::unique_ptr<column> Returned column with new @p scale
*/
template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> rescale(column_view input,
numeric::scale_type scale,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
if (input.type().scale() > scale) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr);
} else {
auto const diff = input.type().scale() - scale;
auto const scalar = make_fixed_point_scalar<T>(::pow(10, -diff), scale_type{diff});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr);
}
};
template <typename _SourceT>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<TargetT>(),
unary_cast<TargetT>{});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<SourceT>;
auto const scale = numeric::scale_type{input.type().scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<DeviceT>(),
input.end<DeviceT>(),
output_mutable.begin<TargetT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_fixed_point<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<TargetT>;
auto const scale = numeric::scale_type{type.scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<DeviceT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
std::is_same<SourceT, TargetT>::value>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this
return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr);
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
not std::is_same<SourceT, TargetT>::value>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
auto const size = input.size();
auto temporary =
std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()},
size,
rmm::device_buffer{size * cudf::size_of(type), stream},
copy_bitmask(input, stream),
input.null_count());
using SourceDeviceT = device_storage_type_t<SourceT>;
using TargetDeviceT = device_storage_type_t<TargetT>;
mutable_column_view output_mutable = *temporary;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceDeviceT>(),
input.end<SourceDeviceT>(),
output_mutable.begin<TargetDeviceT>(),
device_cast<SourceDeviceT, TargetDeviceT>{});
// clearly there is a more efficient way to do this, can optimize in the future
return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr);
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
if (!cudf::is_fixed_width<TargetT>())
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
else if (cudf::is_fixed_point<SourceT>())
CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported");
else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>())
CUDF_FAIL("Timestamps can be created only from duration");
else
CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
}
};
} // anonymous namespace
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 99f251539f1f6b99b1df4457343f685b286b957c.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/binaryop.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/scalar/scalar_factories.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace { // anonymous namespace
template <typename _TargetT>
struct unary_cast {
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element);
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
// Convert source tick counts into target tick counts without blindly truncating them
// by dividing the respective duration time periods (which may not work for time before
// UNIX epoch)
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_duration<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{static_cast<typename TargetT::rep>(element)};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_timestamp<SourceT>() &&
cudf::is_duration<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())};
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<cudf::is_duration<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return static_cast<TargetT>(element.count());
}
template <typename SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_duration<SourceT>() &&
cudf::is_timestamp<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element)
{
return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)};
}
};
template <typename _SourceT, typename _TargetT>
struct fixed_point_unary_cast {
numeric::scale_type scale;
using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>;
using DeviceT = device_storage_type_t<FixedPointT>;
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() &&
cudf::is_numeric<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
}
template <typename SourceT = _SourceT,
typename TargetT = _TargetT,
typename std::enable_if_t<(cudf::is_numeric<_SourceT>() &&
cudf::is_fixed_point<TargetT>())>* = nullptr>
CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
}
};
template <typename From, typename To>
constexpr inline auto is_supported_non_fixed_point_cast()
{
return cudf::is_fixed_width<To>() &&
// Disallow fixed_point here (requires different specialization)
!(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) &&
// Disallow conversions between timestamps and numeric
!(cudf::is_timestamp<From>() && is_numeric<To>()) &&
!(cudf::is_timestamp<To>() && is_numeric<From>());
}
template <typename From, typename To>
constexpr inline auto is_supported_fixed_point_cast()
{
return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) ||
(cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) ||
(cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>());
}
template <typename From, typename To>
constexpr inline auto is_supported_cast()
{
return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>();
}
template <typename From, typename To>
struct device_cast {
__device__ To operator()(From element) { return static_cast<To>(element); }
};
/**
* @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new
* @p scale
*
* @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`)
* @param input Input `column_view`
* @param scale `scale` of the returned `column`
* @param mr Device memory resource used to allocate the returned column's device memory
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return std::unique_ptr<column> Returned column with new @p scale
*/
template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> rescale(column_view input,
numeric::scale_type scale,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
if (input.type().scale() > scale) {
auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr);
} else {
auto const diff = input.type().scale() - scale;
auto const scalar = make_fixed_point_scalar<T>(std::pow(10, -diff), scale_type{diff});
auto const type = cudf::data_type{cudf::type_to_id<T>(), scale};
return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr);
}
};
template <typename _SourceT>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
detail::copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<TargetT>(),
unary_cast<TargetT>{});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() &&
cudf::is_numeric<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<SourceT>;
auto const scale = numeric::scale_type{input.type().scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<DeviceT>(),
input.end<DeviceT>(),
output_mutable.begin<TargetT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_numeric<SourceT>() &&
cudf::is_fixed_point<TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const size = input.size();
auto output =
std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), stream, mr},
copy_bitmask(input, stream, mr),
input.null_count());
mutable_column_view output_mutable = *output;
using DeviceT = device_storage_type_t<TargetT>;
auto const scale = numeric::scale_type{type.scale()};
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceT>(),
input.end<SourceT>(),
output_mutable.begin<DeviceT>(),
fixed_point_unary_cast<SourceT, TargetT>{scale});
return output;
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
std::is_same<SourceT, TargetT>::value>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this
return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr);
}
template <
typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() &&
not std::is_same<SourceT, TargetT>::value>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using namespace numeric;
auto const size = input.size();
auto temporary =
std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()},
size,
rmm::device_buffer{size * cudf::size_of(type), stream},
copy_bitmask(input, stream),
input.null_count());
using SourceDeviceT = device_storage_type_t<SourceT>;
using TargetDeviceT = device_storage_type_t<TargetT>;
mutable_column_view output_mutable = *temporary;
thrust::transform(rmm::exec_policy(stream),
input.begin<SourceDeviceT>(),
input.end<SourceDeviceT>(),
output_mutable.begin<TargetDeviceT>(),
device_cast<SourceDeviceT, TargetDeviceT>{});
// clearly there is a more efficient way to do this, can optimize in the future
return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr);
}
template <typename TargetT,
typename SourceT = _SourceT,
typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr>
std::unique_ptr<column> operator()(data_type,
rmm::cuda_stream_view,
rmm::mr::device_memory_resource*)
{
if (!cudf::is_fixed_width<TargetT>())
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
else if (cudf::is_fixed_point<SourceT>())
CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported");
else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>())
CUDF_FAIL("Timestamps can be created only from duration");
else
CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr);
}
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>(), std::unique_ptr<column>> operator()(Args&&...)
{
CUDF_FAIL("Column type must be numeric or chrono or decimal32/64");
}
};
} // anonymous namespace
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
e95fac1b0077d2c99ac502a16813db0498054211.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "..\include\cuda_sobel.h"
enum GRADIENT_DIRECTION { X, Y };
/*
for instance, sobel operator cane be represented like this sobelKernelXC[3][3] = [1; 2; 1]*[-1, 0, 1], O(m*m) to O(m+m)
so we can convolve image using row-wised and column-wised kernel;
of cause there're lots of filters can be separed or not.
follow this link, list some common filters, https://dsp.stackexchange.com/questions/7586/common-use-cases-for-2d-nonseparable-convolution-filters
*/
__constant__ float sobelKernelXC[3][3] = { { -1.0f,0.0f,1.0f },{ -2.0f,0.0f,2.0f },{ -1.0f,0.0f,1.0f } };
__constant__ float sobelKernelYC[3][3] = { { -1.0f,-2.0f,-1.0f },{ 0.0f,0.0f,0.0f },{ 1.0f,2.0f,1.0f } };
template<GRADIENT_DIRECTION direction, int RADIUS>
__global__ void sobel_gradient(uchar *d_input, int height, int width, uchar *d_output)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = 4 * blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels
static __shared__ float smem[6 + 2 * RADIUS][32 * 4 + 2 * RADIUS];
for (int i = row; i < height; i += blockDim.y*gridDim.y)
{
int index = i*width + col;
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS] = d_input[index];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 32] = d_input[index + 32];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 64] = d_input[index + 64];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 96] = d_input[index + 96];
// up and bottom row
if (threadIdx.y < RADIUS)
{
int global_index = (i - RADIUS)*width + col;
smem[threadIdx.y][threadIdx.x + RADIUS] = d_input[global_index];
smem[threadIdx.y][threadIdx.x + RADIUS + 32] = d_input[global_index + 32];
smem[threadIdx.y][threadIdx.x + RADIUS + 64] = d_input[global_index + 64];
smem[threadIdx.y][threadIdx.x + RADIUS + 96] = d_input[global_index + 96];
}
if (threadIdx.y + RADIUS >= 6)
{
int global_index = (i + RADIUS)*width + col;
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS] = d_input[global_index];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 32] = d_input[global_index + 32];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 64] = d_input[global_index + 64];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 96] = d_input[global_index + 96];
}
// left and right column
if (threadIdx.x < RADIUS)
smem[threadIdx.y + RADIUS][threadIdx.x] = d_input[i*width + (col - RADIUS)];
if (threadIdx.x + RADIUS >= 32)
smem[threadIdx.y + RADIUS][threadIdx.x + 2 * RADIUS + 96] = d_input[i*width + col + RADIUS + 96];
// load upper-left corner
if (threadIdx.x < RADIUS && threadIdx.y < RADIUS)
smem[threadIdx.y][threadIdx.x] = d_input[(i - RADIUS)*width + (col - RADIUS)];
// load upper-right
if (threadIdx.x + RADIUS >= 32 && threadIdx.y < RADIUS)
smem[threadIdx.y][threadIdx.x + 2 * RADIUS + 96] = d_input[(i - RADIUS)*width + (col + RADIUS + 96)];
// bottom-left
if (threadIdx.y + RADIUS >= 6 && threadIdx.x < RADIUS)
smem[threadIdx.y + 2 * RADIUS][threadIdx.x] = d_input[(i + RADIUS)*width + (col - RADIUS)];
// bottom-right
if (threadIdx.y + RADIUS >= 6 && threadIdx.x + RADIUS >= 32)
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + 2 * RADIUS + 96] = d_input[(i + RADIUS)*width + (col + RADIUS + 96)];
__syncthreads();
float sum = 0.0f, sum_32 = 0.0f, sum_64 = 0.0f, sum_96 = 0.0f;
for (int i = -RADIUS; i <= RADIUS; i++)
for (int j = -RADIUS; j <= RADIUS; j++)
{
if (direction)
{
// y direction
sum = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j], sum);
sum_32 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 32], sum_32);
sum_64 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 64], sum_64);
sum_96 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 96], sum_96);
}
else
{
// x direction
sum = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j], sum);
sum_32 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 32], sum_32);
sum_64 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 64], sum_64);
sum_96 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 96], sum_96);
}
}
d_output[index] = sum;
d_output[index + 32] = sum_32;
d_output[index + 64] = sum_64;
d_output[index + 96] = sum_96;
}
}
__global__ void sobel_get_amplitude(uchar *gradient_x, uchar *gradient_y, int height, int width, uchar *amplitude)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
auto saturate_uchar = [](const float & p)
{
uchar q = (uchar)(p > 255.0f ? 255 : p);
return q;
};
auto calc_amplitude = [](const uchar4 *x, const uchar4 *y)
{
float amp_x = __powf(x->x, 2.0f) + __powf(y->x, 2.0f);
float amp_y = __powf(x->y, 2.0f) + __powf(y->y, 2.0f);
float amp_z = __powf(x->z, 2.0f) + __powf(y->z, 2.0f);
float amp_w = __powf(x->w, 2.0f) + __powf(y->w, 2.0f);
amp_x = sqrtf(amp_x);
amp_y = sqrtf(amp_y);
amp_z = sqrtf(amp_z);
amp_w = sqrtf(amp_w);
uchar u_x = (uchar)(amp_x > 255.0f ? 255 : amp_x);
uchar u_y = (uchar)(amp_y > 255.0f ? 255 : amp_y);
uchar u_z = (uchar)(amp_z > 255.0f ? 255 : amp_z);
uchar u_w = (uchar)(amp_w > 255.0f ? 255 : amp_w);
return make_uchar4(u_x, u_y, u_z, u_w);
};
for (int i = row; i < height / 4; i += blockDim.y*gridDim.y) // stride by 4 byte
for (int j = col; j < width; j += blockDim.x*gridDim.x)
{
int index = i*width + j;
auto x = reinterpret_cast<uchar4*>(gradient_x)[index];
auto y = reinterpret_cast<uchar4*>(gradient_y)[index];
reinterpret_cast<uchar4*>(amplitude)[index] = calc_amplitude(&x, &y);
}
}
void cudaSobel(const cv::Mat & input, cv::Mat & output)
{
if (input.channels() != 1)return;
output = cv::Mat(input.size(), input.type(), cv::Scalar(0));
hipStream_t stream_x, stream_y;
CUDA_CALL(hipStreamCreate(&stream_x)); CUDA_CALL(hipStreamCreate(&stream_y));
uchar *d_input, *d_output;
CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.rows*input.cols));
CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.rows*input.cols, hipMemcpyHostToDevice, stream_x));
CUDA_CALL(hipMalloc(&d_output, sizeof(uchar)*input.rows*input.cols));
// gradient matrix
uchar *gradient_x, *gradient_y;
CUDA_CALL(hipMalloc(&gradient_x, sizeof(uchar)*input.rows*input.cols));
CUDA_CALL(hipMalloc(&gradient_y, sizeof(uchar)*input.rows*input.cols));
// define block size and
dim3 block_size(THREAD_MULTIPLE, 6);
// divide the image into 16 grids, smaller grid do more things, improve performance a lot.
dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y));
hipLaunchKernelGGL(( sobel_gradient<X, 1>) , dim3(grid_size), dim3(block_size), 0, stream_x, d_input, input.rows, input.cols, gradient_x);
hipLaunchKernelGGL(( sobel_gradient<Y, 1>) , dim3(grid_size), dim3(block_size), 0, stream_y, d_input, input.rows, input.cols, gradient_y);
CUDA_CALL(hipStreamSynchronize(stream_y));
hipLaunchKernelGGL(( sobel_get_amplitude) , dim3(grid_size), dim3(block_size), 0, stream_x, gradient_x, gradient_y, input.rows, input.cols, d_output);
CUDA_CALL(hipDeviceSynchronize());
CUDA_CALL(hipMemcpy(output.data, d_output, sizeof(uchar)*input.rows*input.cols, hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); CUDA_CALL(hipFree(gradient_x)); CUDA_CALL(hipFree(gradient_y));
CUDA_CALL(hipStreamDestroy(stream_x)); CUDA_CALL(hipStreamDestroy(stream_y));
} | e95fac1b0077d2c99ac502a16813db0498054211.cu | #include "..\include\cuda_sobel.h"
enum GRADIENT_DIRECTION { X, Y };
/*
for instance, sobel operator cane be represented like this sobelKernelXC[3][3] = [1; 2; 1]*[-1, 0, 1], O(m*m) to O(m+m)
so we can convolve image using row-wised and column-wised kernel;
of cause there're lots of filters can be separed or not.
follow this link, list some common filters, https://dsp.stackexchange.com/questions/7586/common-use-cases-for-2d-nonseparable-convolution-filters
*/
__constant__ float sobelKernelXC[3][3] = { { -1.0f,0.0f,1.0f },{ -2.0f,0.0f,2.0f },{ -1.0f,0.0f,1.0f } };
__constant__ float sobelKernelYC[3][3] = { { -1.0f,-2.0f,-1.0f },{ 0.0f,0.0f,0.0f },{ 1.0f,2.0f,1.0f } };
template<GRADIENT_DIRECTION direction, int RADIUS>
__global__ void sobel_gradient(uchar *d_input, int height, int width, uchar *d_output)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = 4 * blockDim.x*blockIdx.x + threadIdx.x; // each thread handle 4 pixels
static __shared__ float smem[6 + 2 * RADIUS][32 * 4 + 2 * RADIUS];
for (int i = row; i < height; i += blockDim.y*gridDim.y)
{
int index = i*width + col;
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS] = d_input[index];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 32] = d_input[index + 32];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 64] = d_input[index + 64];
smem[threadIdx.y + RADIUS][threadIdx.x + RADIUS + 96] = d_input[index + 96];
// up and bottom row
if (threadIdx.y < RADIUS)
{
int global_index = (i - RADIUS)*width + col;
smem[threadIdx.y][threadIdx.x + RADIUS] = d_input[global_index];
smem[threadIdx.y][threadIdx.x + RADIUS + 32] = d_input[global_index + 32];
smem[threadIdx.y][threadIdx.x + RADIUS + 64] = d_input[global_index + 64];
smem[threadIdx.y][threadIdx.x + RADIUS + 96] = d_input[global_index + 96];
}
if (threadIdx.y + RADIUS >= 6)
{
int global_index = (i + RADIUS)*width + col;
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS] = d_input[global_index];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 32] = d_input[global_index + 32];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 64] = d_input[global_index + 64];
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + RADIUS + 96] = d_input[global_index + 96];
}
// left and right column
if (threadIdx.x < RADIUS)
smem[threadIdx.y + RADIUS][threadIdx.x] = d_input[i*width + (col - RADIUS)];
if (threadIdx.x + RADIUS >= 32)
smem[threadIdx.y + RADIUS][threadIdx.x + 2 * RADIUS + 96] = d_input[i*width + col + RADIUS + 96];
// load upper-left corner
if (threadIdx.x < RADIUS && threadIdx.y < RADIUS)
smem[threadIdx.y][threadIdx.x] = d_input[(i - RADIUS)*width + (col - RADIUS)];
// load upper-right
if (threadIdx.x + RADIUS >= 32 && threadIdx.y < RADIUS)
smem[threadIdx.y][threadIdx.x + 2 * RADIUS + 96] = d_input[(i - RADIUS)*width + (col + RADIUS + 96)];
// bottom-left
if (threadIdx.y + RADIUS >= 6 && threadIdx.x < RADIUS)
smem[threadIdx.y + 2 * RADIUS][threadIdx.x] = d_input[(i + RADIUS)*width + (col - RADIUS)];
// bottom-right
if (threadIdx.y + RADIUS >= 6 && threadIdx.x + RADIUS >= 32)
smem[threadIdx.y + 2 * RADIUS][threadIdx.x + 2 * RADIUS + 96] = d_input[(i + RADIUS)*width + (col + RADIUS + 96)];
__syncthreads();
float sum = 0.0f, sum_32 = 0.0f, sum_64 = 0.0f, sum_96 = 0.0f;
for (int i = -RADIUS; i <= RADIUS; i++)
for (int j = -RADIUS; j <= RADIUS; j++)
{
if (direction)
{
// y direction
sum = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j], sum);
sum_32 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 32], sum_32);
sum_64 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 64], sum_64);
sum_96 = fmaf(sobelKernelYC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 96], sum_96);
}
else
{
// x direction
sum = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j], sum);
sum_32 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 32], sum_32);
sum_64 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 64], sum_64);
sum_96 = fmaf(sobelKernelXC[RADIUS + i][RADIUS + j], smem[threadIdx.y + RADIUS - i][threadIdx.x + RADIUS - j + 96], sum_96);
}
}
d_output[index] = sum;
d_output[index + 32] = sum_32;
d_output[index + 64] = sum_64;
d_output[index + 96] = sum_96;
}
}
__global__ void sobel_get_amplitude(uchar *gradient_x, uchar *gradient_y, int height, int width, uchar *amplitude)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
auto saturate_uchar = [](const float & p)
{
uchar q = (uchar)(p > 255.0f ? 255 : p);
return q;
};
auto calc_amplitude = [](const uchar4 *x, const uchar4 *y)
{
float amp_x = __powf(x->x, 2.0f) + __powf(y->x, 2.0f);
float amp_y = __powf(x->y, 2.0f) + __powf(y->y, 2.0f);
float amp_z = __powf(x->z, 2.0f) + __powf(y->z, 2.0f);
float amp_w = __powf(x->w, 2.0f) + __powf(y->w, 2.0f);
amp_x = sqrtf(amp_x);
amp_y = sqrtf(amp_y);
amp_z = sqrtf(amp_z);
amp_w = sqrtf(amp_w);
uchar u_x = (uchar)(amp_x > 255.0f ? 255 : amp_x);
uchar u_y = (uchar)(amp_y > 255.0f ? 255 : amp_y);
uchar u_z = (uchar)(amp_z > 255.0f ? 255 : amp_z);
uchar u_w = (uchar)(amp_w > 255.0f ? 255 : amp_w);
return make_uchar4(u_x, u_y, u_z, u_w);
};
for (int i = row; i < height / 4; i += blockDim.y*gridDim.y) // stride by 4 byte
for (int j = col; j < width; j += blockDim.x*gridDim.x)
{
int index = i*width + j;
auto x = reinterpret_cast<uchar4*>(gradient_x)[index];
auto y = reinterpret_cast<uchar4*>(gradient_y)[index];
reinterpret_cast<uchar4*>(amplitude)[index] = calc_amplitude(&x, &y);
}
}
void cudaSobel(const cv::Mat & input, cv::Mat & output)
{
if (input.channels() != 1)return;
output = cv::Mat(input.size(), input.type(), cv::Scalar(0));
cudaStream_t stream_x, stream_y;
CUDA_CALL(cudaStreamCreate(&stream_x)); CUDA_CALL(cudaStreamCreate(&stream_y));
uchar *d_input, *d_output;
CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.rows*input.cols));
CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.rows*input.cols, cudaMemcpyHostToDevice, stream_x));
CUDA_CALL(cudaMalloc(&d_output, sizeof(uchar)*input.rows*input.cols));
// gradient matrix
uchar *gradient_x, *gradient_y;
CUDA_CALL(cudaMalloc(&gradient_x, sizeof(uchar)*input.rows*input.cols));
CUDA_CALL(cudaMalloc(&gradient_y, sizeof(uchar)*input.rows*input.cols));
// define block size and
dim3 block_size(THREAD_MULTIPLE, 6);
// divide the image into 16 grids, smaller grid do more things, improve performance a lot.
dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y));
sobel_gradient<X, 1> <<<grid_size, block_size, 0, stream_x>>> (d_input, input.rows, input.cols, gradient_x);
sobel_gradient<Y, 1> <<<grid_size, block_size, 0, stream_y>>> (d_input, input.rows, input.cols, gradient_y);
CUDA_CALL(cudaStreamSynchronize(stream_y));
sobel_get_amplitude <<<grid_size, block_size, 0, stream_x>>>(gradient_x, gradient_y, input.rows, input.cols, d_output);
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaMemcpy(output.data, d_output, sizeof(uchar)*input.rows*input.cols, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); CUDA_CALL(cudaFree(gradient_x)); CUDA_CALL(cudaFree(gradient_y));
CUDA_CALL(cudaStreamDestroy(stream_x)); CUDA_CALL(cudaStreamDestroy(stream_y));
} |
48fe44e7e3c557ff0565bee67be33e9eb197147e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* *
* Distributed Hash Cracker v3.0 *
* *
* Copyright (c) 2009 RPISEC. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without modifi- *
* cation, are permitted provided that the following conditions are met: *
* *
* * Redistributions of source code must retain the above copyright notice *
* this list of conditions and the following disclaimer. *
* *
* * Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* * Neither the name of RPISEC nor the names of its contributors may be *
* used to endorse or promote products derived from this software without *
* specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY RPISEC "AS IS" AND ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN *
* NO EVENT SHALL RPISEC BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED *
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
* *
******************************************************************************/
/*!
@file sha1_kernel.cu
@brief CUDA implementation of SHA1
*/
//Textures
texture<int, 1, hipReadModeElementType> texCharset;
#define sha1_f1(b,c,d) ( (b & c) | (~b & d) )
#define sha1_f2(b,c,d) (b ^ c ^ d)
#define sha1_f3(b,c,d) ( (b & c) | (b & d) | (c & d) )
#define sha1_f4(b,c,d) (b ^ c ^ d)
#define ROTL(a,shamt) (((a) << shamt) | ((a) >> (32-shamt)))
#define bswap(x) ( (x & 0xFF)<<24 | (x&0xFF00) << 8 | (x&0xFF0000) >> 8 | (x&0xFF000000) >> 24 )
//Fake-array macros
#define AddPadding(num) case num: \
w##num = (w##num & ~paddmask) | padding; \
break
#define InitGuess(num, a,b,c,d) \
{ \
/* Calculate the four indices */ \
LstartInit(lstart3, a); \
LstartInit(lstart2, b); \
LstartInit(lstart1, c); \
LstartInit(lstart0, d); \
/* Pack four elements into the int (if we exceed length, padding will overwrite the garbage) */ \
w##num = \
(charset[lstart3] << 24) | \
(charset[lstart2] << 16) | \
(charset[lstart1] << 8) | \
charset[lstart0]; \
}
#define LstartInit(ls, num) \
{ \
/* Get initial value and apply carry-in */ \
ls = carry + start[num]; \
/* Rightmost value? Bump by index */ \
if(num == lm1) \
ls += index; \
/* Carry out */ \
if(ls >= base && num<len) \
{ \
/* Calculate carry */ \
carry = ls / base; \
/* Update this digit */ \
ls %= base; \
} \
else \
carry = 0; \
}
#define PostShift() \
e=d; \
d=c; \
c=ROTL(b,30); \
b=a; \
a=temp; \
#define PreShift() \
{ \
/* Set w[round] */ \
w16 = ROTL(w13 ^ w8 ^ w2 ^ w0,1); \
/* Shift Ws */\
w0 = w1; \
w1 = w2; \
w2 = w3; \
w3 = w4; \
w4 = w5; \
w5 = w6; \
w6 = w7; \
w7 = w8; \
w8 = w9; \
w9 = w10; \
w10 = w11; \
w11 = w12; \
w12 = w13; \
w13 = w14; \
w14 = w15; \
w15 = w16; \
}
#define RoundFromBlock1() \
PreShift(); \
temp=ROTL(a,5) + sha1_f1(b,c,d) + e + w16 + 0x5A827999; \
PostShift();
#define RoundFromBlock2() \
PreShift(); \
temp=ROTL(a,5) + sha1_f2(b,c,d) + e + w16 + 0x6ED9EBA1; \
PostShift();
#define RoundFromBlock3() \
PreShift(); \
temp=ROTL(a,5) + e + w16 + 0x8F1BBCDC; \
temp += sha1_f3(b,c,d); \
PostShift();
#define RoundFromBlock4() \
PreShift(); \
temp=ROTL(a,5) + sha1_f4(b,c,d) + e + w16 + 0xCA62C1D6; \
PostShift();
#define SaveOutput(num) \
case num: \
reinterpret_cast<int*>(output)[num] = w##num;
#define SaveOutputBatch(num) \
case num: \
po[num] = w##num;
/*!
@brief CUDA implementation of SHA1
Thread-per-block requirement: minimum 64
@param gtarget Target value (five ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void sha1Kernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value (do byte-swap here)
__shared__ int target[5];
if(threadIdx.x < 5)
target[threadIdx.x] = bswap(gtarget[threadIdx.x]);
//Wait for all cache filling to finish
__syncthreads();
//Core round functions
#include "sha1_kernel_core.h"
//Test the output
int* pt = (int*)target;
if( (pt[0] == a) && (pt[1] == b) && (pt[2] == c) && (pt[3] == d) && (pt[4] == e))
{
//If we get here, we must be a match! Save the result and quit
*status = 1;
unsigned int lo4 = len/4;
unsigned int lstart0=0, lstart1=0, lstart2=0, lstart3=0;
unsigned int lm1 = len-1;
unsigned int carry = 0; //no initial carry-in
switch(lo4)
{
case 7:
InitGuess(7, 31,30,29,28);
case 6:
InitGuess(6, 27,26,25,24);
case 5:
InitGuess(5, 23,22,21,20);
case 4:
InitGuess(4, 19,18,17,16);
case 3:
InitGuess(3, 15,14,13,12);
case 2:
InitGuess(2, 11,10,9,8);
case 1:
InitGuess(1, 7,6,5,4);
case 0:
default:
InitGuess(0, 3,2,1,0);
}
switch(lo4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
/*!
@brief CUDA implementation of SHA1 with batch processing support
Thread-per-block requirement: minimum 64
@param gtarget Target value (five ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void sha1BatchKernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value (do byte-swap here)
__shared__ int target[5 * 128];
if(threadIdx.x < 64)
{
int td = threadIdx.x;
if(td < hashcount)
{
for(int i=0; i<5; i++)
target[5*td + i] = bswap(gtarget[5*td + i]);
}
if(td > 64)
{
td -= 64;
if(td < hashcount)
{
for(int i=0; i<5; i++)
target[5*td + i] = bswap(gtarget[5*td + i]);
}
}
}
//Wait for all cache filling to finish
__syncthreads();
//Core round functions
#include "sha1_kernel_core.h"
//Test the output
for(int i=0; i<hashcount; i++)
{
int* xtarget = target + (5*i);
//Check results
if(xtarget[0] == a && xtarget[1] == b && xtarget[2] == c && xtarget[3] == d && xtarget[4] == e)
{
status[i] = 1;
unsigned int lo4 = len/4;
unsigned int lstart0=0, lstart1=0, lstart2=0, lstart3=0;
unsigned int lm1 = len-1;
unsigned int carry = 0; //no initial carry-in
switch(lo4)
{
case 7:
InitGuess(7, 31,30,29,28);
case 6:
InitGuess(6, 27,26,25,24);
case 5:
InitGuess(5, 23,22,21,20);
case 4:
InitGuess(4, 19,18,17,16);
case 3:
InitGuess(3, 15,14,13,12);
case 2:
InitGuess(2, 11,10,9,8);
case 1:
InitGuess(1, 7,6,5,4);
case 0:
default:
InitGuess(0, 3,2,1,0);
}
int* po = (int*)output + (i*8);
switch(lo4)
{
SaveOutputBatch(7);
SaveOutputBatch(6);
SaveOutputBatch(5);
SaveOutputBatch(4);
SaveOutputBatch(3);
SaveOutputBatch(2);
SaveOutputBatch(1);
SaveOutputBatch(0);
}
}
}
}
| 48fe44e7e3c557ff0565bee67be33e9eb197147e.cu | /******************************************************************************
* *
* Distributed Hash Cracker v3.0 *
* *
* Copyright (c) 2009 RPISEC. *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without modifi- *
* cation, are permitted provided that the following conditions are met: *
* *
* * Redistributions of source code must retain the above copyright notice *
* this list of conditions and the following disclaimer. *
* *
* * Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in the *
* documentation and/or other materials provided with the distribution. *
* *
* * Neither the name of RPISEC nor the names of its contributors may be *
* used to endorse or promote products derived from this software without *
* specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY RPISEC "AS IS" AND ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN *
* NO EVENT SHALL RPISEC BE HELD LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED *
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR *
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF *
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS *
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *
* *
******************************************************************************/
/*!
@file sha1_kernel.cu
@brief CUDA implementation of SHA1
*/
//Textures
texture<int, 1, cudaReadModeElementType> texCharset;
#define sha1_f1(b,c,d) ( (b & c) | (~b & d) )
#define sha1_f2(b,c,d) (b ^ c ^ d)
#define sha1_f3(b,c,d) ( (b & c) | (b & d) | (c & d) )
#define sha1_f4(b,c,d) (b ^ c ^ d)
#define ROTL(a,shamt) (((a) << shamt) | ((a) >> (32-shamt)))
#define bswap(x) ( (x & 0xFF)<<24 | (x&0xFF00) << 8 | (x&0xFF0000) >> 8 | (x&0xFF000000) >> 24 )
//Fake-array macros
#define AddPadding(num) case num: \
w##num = (w##num & ~paddmask) | padding; \
break
#define InitGuess(num, a,b,c,d) \
{ \
/* Calculate the four indices */ \
LstartInit(lstart3, a); \
LstartInit(lstart2, b); \
LstartInit(lstart1, c); \
LstartInit(lstart0, d); \
/* Pack four elements into the int (if we exceed length, padding will overwrite the garbage) */ \
w##num = \
(charset[lstart3] << 24) | \
(charset[lstart2] << 16) | \
(charset[lstart1] << 8) | \
charset[lstart0]; \
}
#define LstartInit(ls, num) \
{ \
/* Get initial value and apply carry-in */ \
ls = carry + start[num]; \
/* Rightmost value? Bump by index */ \
if(num == lm1) \
ls += index; \
/* Carry out */ \
if(ls >= base && num<len) \
{ \
/* Calculate carry */ \
carry = ls / base; \
/* Update this digit */ \
ls %= base; \
} \
else \
carry = 0; \
}
#define PostShift() \
e=d; \
d=c; \
c=ROTL(b,30); \
b=a; \
a=temp; \
#define PreShift() \
{ \
/* Set w[round] */ \
w16 = ROTL(w13 ^ w8 ^ w2 ^ w0,1); \
/* Shift Ws */\
w0 = w1; \
w1 = w2; \
w2 = w3; \
w3 = w4; \
w4 = w5; \
w5 = w6; \
w6 = w7; \
w7 = w8; \
w8 = w9; \
w9 = w10; \
w10 = w11; \
w11 = w12; \
w12 = w13; \
w13 = w14; \
w14 = w15; \
w15 = w16; \
}
#define RoundFromBlock1() \
PreShift(); \
temp=ROTL(a,5) + sha1_f1(b,c,d) + e + w16 + 0x5A827999; \
PostShift();
#define RoundFromBlock2() \
PreShift(); \
temp=ROTL(a,5) + sha1_f2(b,c,d) + e + w16 + 0x6ED9EBA1; \
PostShift();
#define RoundFromBlock3() \
PreShift(); \
temp=ROTL(a,5) + e + w16 + 0x8F1BBCDC; \
temp += sha1_f3(b,c,d); \
PostShift();
#define RoundFromBlock4() \
PreShift(); \
temp=ROTL(a,5) + sha1_f4(b,c,d) + e + w16 + 0xCA62C1D6; \
PostShift();
#define SaveOutput(num) \
case num: \
reinterpret_cast<int*>(output)[num] = w##num;
#define SaveOutputBatch(num) \
case num: \
po[num] = w##num;
/*!
@brief CUDA implementation of SHA1
Thread-per-block requirement: minimum 64
@param gtarget Target value (five ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void sha1Kernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value (do byte-swap here)
__shared__ int target[5];
if(threadIdx.x < 5)
target[threadIdx.x] = bswap(gtarget[threadIdx.x]);
//Wait for all cache filling to finish
__syncthreads();
//Core round functions
#include "sha1_kernel_core.h"
//Test the output
int* pt = (int*)target;
if( (pt[0] == a) && (pt[1] == b) && (pt[2] == c) && (pt[3] == d) && (pt[4] == e))
{
//If we get here, we must be a match! Save the result and quit
*status = 1;
unsigned int lo4 = len/4;
unsigned int lstart0=0, lstart1=0, lstart2=0, lstart3=0;
unsigned int lm1 = len-1;
unsigned int carry = 0; //no initial carry-in
switch(lo4)
{
case 7:
InitGuess(7, 31,30,29,28);
case 6:
InitGuess(6, 27,26,25,24);
case 5:
InitGuess(5, 23,22,21,20);
case 4:
InitGuess(4, 19,18,17,16);
case 3:
InitGuess(3, 15,14,13,12);
case 2:
InitGuess(2, 11,10,9,8);
case 1:
InitGuess(1, 7,6,5,4);
case 0:
default:
InitGuess(0, 3,2,1,0);
}
switch(lo4)
{
SaveOutput(7);
SaveOutput(6);
SaveOutput(5);
SaveOutput(4);
SaveOutput(3);
SaveOutput(2);
SaveOutput(1);
SaveOutput(0);
}
}
}
/*!
@brief CUDA implementation of SHA1 with batch processing support
Thread-per-block requirement: minimum 64
@param gtarget Target value (five ints, little endian)
@param gstart Start index in charset (array of 32 ints, data is left aligned, unused values are at right)
@param gsalt Salt (not used)
@param status Set to true by a thread which succeeds in cracking the hash
@param output Set to the collision by a thread which succeeds in cracking the hash
@param base Length of the character set (passed in texCharset texture)
@param len Length of valid data in gstart
@param saltlen Length of salt (not used)
@param hashcount Number of hashes being tested (not used)
*/
extern "C" __global__ void sha1BatchKernel(int* gtarget, int* gstart, char* gsalt, char* status, char* output, int base, int len, int saltlen, int hashcount)
{
//Get our position in the grid
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
//Cache charset in shmem
__shared__ char charset[256];
if(threadIdx.x < ceil((float)base / 4))
{
int* ccs = (int*)&charset[0];
ccs[threadIdx.x] = tex1Dfetch(texCharset, threadIdx.x);
}
//Cache start value
__shared__ int start[32];
if(threadIdx.x < len)
start[threadIdx.x] = gstart[threadIdx.x];
//Cache target value (do byte-swap here)
__shared__ int target[5 * 128];
if(threadIdx.x < 64)
{
int td = threadIdx.x;
if(td < hashcount)
{
for(int i=0; i<5; i++)
target[5*td + i] = bswap(gtarget[5*td + i]);
}
if(td > 64)
{
td -= 64;
if(td < hashcount)
{
for(int i=0; i<5; i++)
target[5*td + i] = bswap(gtarget[5*td + i]);
}
}
}
//Wait for all cache filling to finish
__syncthreads();
//Core round functions
#include "sha1_kernel_core.h"
//Test the output
for(int i=0; i<hashcount; i++)
{
int* xtarget = target + (5*i);
//Check results
if(xtarget[0] == a && xtarget[1] == b && xtarget[2] == c && xtarget[3] == d && xtarget[4] == e)
{
status[i] = 1;
unsigned int lo4 = len/4;
unsigned int lstart0=0, lstart1=0, lstart2=0, lstart3=0;
unsigned int lm1 = len-1;
unsigned int carry = 0; //no initial carry-in
switch(lo4)
{
case 7:
InitGuess(7, 31,30,29,28);
case 6:
InitGuess(6, 27,26,25,24);
case 5:
InitGuess(5, 23,22,21,20);
case 4:
InitGuess(4, 19,18,17,16);
case 3:
InitGuess(3, 15,14,13,12);
case 2:
InitGuess(2, 11,10,9,8);
case 1:
InitGuess(1, 7,6,5,4);
case 0:
default:
InitGuess(0, 3,2,1,0);
}
int* po = (int*)output + (i*8);
switch(lo4)
{
SaveOutputBatch(7);
SaveOutputBatch(6);
SaveOutputBatch(5);
SaveOutputBatch(4);
SaveOutputBatch(3);
SaveOutputBatch(2);
SaveOutputBatch(1);
SaveOutputBatch(0);
}
}
}
}
|
9717a358eb84b64ff6313ccbf285c91f8c2d579d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:BOOGIE_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//It uses uses struct-assignment, which is translated into a memcpy by clang and
//dealt with as a series of reads/writes by bugle.
typedef struct {
short x;
short y;
} pair_t;
__global__ void k(pair_t *pairs) {
pair_t fresh;
pairs[42] = fresh;
}
| 9717a358eb84b64ff6313ccbf285c91f8c2d579d.cu | //xfail:BOOGIE_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//It uses uses struct-assignment, which is translated into a memcpy by clang and
//dealt with as a series of reads/writes by bugle.
typedef struct {
short x;
short y;
} pair_t;
__global__ void k(pair_t *pairs) {
pair_t fresh;
pairs[42] = fresh;
}
|
df6c5ca4ebd586b006780f3636adda664d35d3da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "kmeans.h"
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC (void*)(0)
#endif
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(-1);
}
}
static void vec_print(float *d_vec, int len)
{
float vec[128];
hipMemcpy(vec, d_vec, len*sizeof(float), hipMemcpyDeviceToHost);
for(int i=0; i<len; i++)
printf("%f ", vec[i]);
printf("\n");
}
__device__ void load_center(float *target, float* source, int len)
{
for( int i = 0; i < len+1; i++ )
target[i] = source[i];
}
__device__ float gpu_distance(float *v1, float *v2, int len)
{
float d=0;
for(int i = 1; i < len+1; i++){
float tmp = v2[i] - v1[i];
d += tmp*tmp;
}
return d;
}
__global__ void gpu_update_label(int k, int n, int len, float* vec, int* label, float* cen)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
float min_d=INF, d;
int new_label=1021;
extern __shared__ float center[];
for(int j = 0; j < k; j++){
if(threadIdx.x==0)load_center(center, cen+j*(len+1), len);
__syncthreads();
if(i<n){
d = gpu_distance(center, vector, len);
if(d < min_d){
min_d = d;
new_label = j;
}
}
__syncthreads();
}
__syncthreads();
label[i]=new_label;
}
static void updateCenter(int k, int n, int len, float *vec[],
int* label, float* center)
{
float* tmp_cen;
memset(center, 0, k*(len+1)*sizeof(float));
for(int i=0; i<n; i++){
if(label[i]>=k)printf("label[%d]:%d\n", i, label[i]);
assert(label[i]<k && label[i]>=0);
tmp_cen=center+label[i]*(len+1);
for(int j=0; j<len; j++)
tmp_cen[j+1] += vec[i][j];
tmp_cen[0]++;
}
for(int i=0; i<k; i++){
tmp_cen=center+i*(len+1);
for(int j=1; j<len+1; j++)
tmp_cen[j]/=tmp_cen[0];
}
}
__global__ void gpu_update_center(int k, int n, int len, float* vec, float* cen)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
float* tmp_cen;
float* center;
int label;
if(i==0){
for(int j = 0; j < k; j++){
tmp_cen=cen+j*(len+1);
for(int m=0; m<len+1; m++)tmp_cen[m]=0;
}
}
__syncthreads();
if(i<n){
label=*((int*)vector);
center = cen + label*(len+1);
for(int j=1; j<len+1; j++){
center[j] += vector[j];
}
center[0]++;
}
__syncthreads();
if(i==0){
for(int j=0; j<k; j++){
tmp_cen=cen+j*(len+1);
for(int m=1; m<len+1; m++){
tmp_cen[m]/=*tmp_cen;
}
}
}
__syncthreads();
}
static void updateLabel(int k, int n, int len, float *vec,int* label, float* center)
{
int numThreadsPerBlock=256;
int numBlocks=n/numThreadsPerBlock;
hipLaunchKernelGGL(( gpu_update_label), dim3(numBlocks), dim3(numThreadsPerBlock), len*sizeof(float), 0, k, n, len, vec, label, center);
checkCUDAError("updateLabel:");
}
/* static void updateCenter(int k, int n, int len, float *vec, float* center) */
/* { */
/* int numThreadsPerBlock=256; */
/* int numBlocks=n/numThreadsPerBlock ; */
/* gpu_update_center<<<numBlocks, numThreadsPerBlock>>>(k, n, len, vec, center); */
/* checkCUDAError("updateCenter:"); */
/* } */
__global__ void gpu_copy_label(int n, int len, float* vec, int* lab)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
if(i<n)
lab[i]=*((int*)vector);
}
static void copyLabel(int n, int len, float* vec, int* lab)
{
int numThreadsPerBlock=256;
int numBlocks=n/numThreadsPerBlock;
int* label;
hipMalloc((void**)&label, n*sizeof(int));
hipLaunchKernelGGL(( gpu_copy_label), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, n, len, vec, label);
checkCUDAError("copyLabel:");
hipMemcpy(lab, label, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(label);
}
static void initVec(int n, int len, int* label, float *vec[], float** d_vec)
{
int i;
hipMalloc((void**)d_vec, n*(len+1)*sizeof(float));
for(i=0; i<n; i++){
hipMemcpy((*d_vec)+i*(len+1), label+i, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy((*d_vec)+i*(len+1)+1, vec[i], len*sizeof(float), hipMemcpyHostToDevice);
}
}
static void freeVec(float *d_vec)
{
hipFree(d_vec);
}
static void initCenter(int k, int len, float** center, float** d_center)
{
*center=(float*)malloc(k*(len+1)*sizeof(float));
if(!*center)panic("no mem!\n");
hipMalloc((void**)d_center, k*(len+1)*sizeof(float));
}
static void freeCenter(float* center)
{
hipFree(center);
}
static bool isChanged(int n, int *label[])
{
int i;
for(i=0; i<n; i++)
if(label[0][i]!=label[1][i])
return true;
return false;
}
void initLabel(int n, int *lab, int* label[])
{
label[0]=(int*)malloc(n*sizeof(int));
label[1]=(int*)malloc(n*sizeof(int));
if(!label[0] || !label[1])panic("no mem!\n");
memcpy(label[0], lab, n*sizeof(int));
memset(label[1], 0, n*sizeof(int));
}
void freeLabel(int* label[])
{
free(label[0]);
free(label[1]);
}
void copyLabel(int n, int* lab, int* label)
{
memcpy(lab, label, n*sizeof(int));
}
void GPUKmeans::kmeansClustering(int k, int n, int len, float *vec[],
int* lab)
{
float *center, *d_center;
int *d_label, *label[2];
float* d_vec;
bool changed=true;
int cur=0, iter;
initLabel(n, lab, label);
initCenter(k, len, ¢er, &d_center);
initVec(n, len, label[cur], vec, &d_vec);
hipMalloc((void**)&d_label, n*sizeof(int));
for(iter=0; iter<=10 && changed; iter++){
updateCenter(k, n, len, vec, label[cur], center);
hipMemcpy(d_center, center, k*(len+1)*sizeof(float), hipMemcpyHostToDevice);
/* vec_print(d_center, 10); */
updateLabel(k, n, len, d_vec, d_label, d_center);
cur^=1;
hipMemcpy(label[cur], d_label, n*sizeof(int), hipMemcpyDeviceToHost);
/* changed=isChanged(n, label); */
}
copyLabel(n, lab, label[cur]);
freeLabel(label);
freeVec(d_vec);
freeCenter(center);
printf("iter: %d\n", iter);
}
| df6c5ca4ebd586b006780f3636adda664d35d3da.cu | #include <math.h>
#include "kmeans.h"
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC (void*)(0)
#endif
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
static void vec_print(float *d_vec, int len)
{
float vec[128];
cudaMemcpy(vec, d_vec, len*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i<len; i++)
printf("%f ", vec[i]);
printf("\n");
}
__device__ void load_center(float *target, float* source, int len)
{
for( int i = 0; i < len+1; i++ )
target[i] = source[i];
}
__device__ float gpu_distance(float *v1, float *v2, int len)
{
float d=0;
for(int i = 1; i < len+1; i++){
float tmp = v2[i] - v1[i];
d += tmp*tmp;
}
return d;
}
__global__ void gpu_update_label(int k, int n, int len, float* vec, int* label, float* cen)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
float min_d=INF, d;
int new_label=1021;
extern __shared__ float center[];
for(int j = 0; j < k; j++){
if(threadIdx.x==0)load_center(center, cen+j*(len+1), len);
__syncthreads();
if(i<n){
d = gpu_distance(center, vector, len);
if(d < min_d){
min_d = d;
new_label = j;
}
}
__syncthreads();
}
__syncthreads();
label[i]=new_label;
}
static void updateCenter(int k, int n, int len, float *vec[],
int* label, float* center)
{
float* tmp_cen;
memset(center, 0, k*(len+1)*sizeof(float));
for(int i=0; i<n; i++){
if(label[i]>=k)printf("label[%d]:%d\n", i, label[i]);
assert(label[i]<k && label[i]>=0);
tmp_cen=center+label[i]*(len+1);
for(int j=0; j<len; j++)
tmp_cen[j+1] += vec[i][j];
tmp_cen[0]++;
}
for(int i=0; i<k; i++){
tmp_cen=center+i*(len+1);
for(int j=1; j<len+1; j++)
tmp_cen[j]/=tmp_cen[0];
}
}
__global__ void gpu_update_center(int k, int n, int len, float* vec, float* cen)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
float* tmp_cen;
float* center;
int label;
if(i==0){
for(int j = 0; j < k; j++){
tmp_cen=cen+j*(len+1);
for(int m=0; m<len+1; m++)tmp_cen[m]=0;
}
}
__syncthreads();
if(i<n){
label=*((int*)vector);
center = cen + label*(len+1);
for(int j=1; j<len+1; j++){
center[j] += vector[j];
}
center[0]++;
}
__syncthreads();
if(i==0){
for(int j=0; j<k; j++){
tmp_cen=cen+j*(len+1);
for(int m=1; m<len+1; m++){
tmp_cen[m]/=*tmp_cen;
}
}
}
__syncthreads();
}
static void updateLabel(int k, int n, int len, float *vec,int* label, float* center)
{
int numThreadsPerBlock=256;
int numBlocks=n/numThreadsPerBlock;
gpu_update_label<<<numBlocks, numThreadsPerBlock, len*sizeof(float)>>>(k, n, len, vec, label, center);
checkCUDAError("updateLabel:");
}
/* static void updateCenter(int k, int n, int len, float *vec, float* center) */
/* { */
/* int numThreadsPerBlock=256; */
/* int numBlocks=n/numThreadsPerBlock ; */
/* gpu_update_center<<<numBlocks, numThreadsPerBlock>>>(k, n, len, vec, center); */
/* checkCUDAError("updateCenter:"); */
/* } */
__global__ void gpu_copy_label(int n, int len, float* vec, int* lab)
{
int i= blockIdx.x*blockDim.x + threadIdx.x;
float* vector=vec+i*(len+1);
if(i<n)
lab[i]=*((int*)vector);
}
static void copyLabel(int n, int len, float* vec, int* lab)
{
int numThreadsPerBlock=256;
int numBlocks=n/numThreadsPerBlock;
int* label;
cudaMalloc((void**)&label, n*sizeof(int));
gpu_copy_label<<<numBlocks, numThreadsPerBlock>>>(n, len, vec, label);
checkCUDAError("copyLabel:");
cudaMemcpy(lab, label, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(label);
}
static void initVec(int n, int len, int* label, float *vec[], float** d_vec)
{
int i;
cudaMalloc((void**)d_vec, n*(len+1)*sizeof(float));
for(i=0; i<n; i++){
cudaMemcpy((*d_vec)+i*(len+1), label+i, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy((*d_vec)+i*(len+1)+1, vec[i], len*sizeof(float), cudaMemcpyHostToDevice);
}
}
static void freeVec(float *d_vec)
{
cudaFree(d_vec);
}
static void initCenter(int k, int len, float** center, float** d_center)
{
*center=(float*)malloc(k*(len+1)*sizeof(float));
if(!*center)panic("no mem!\n");
cudaMalloc((void**)d_center, k*(len+1)*sizeof(float));
}
static void freeCenter(float* center)
{
cudaFree(center);
}
static bool isChanged(int n, int *label[])
{
int i;
for(i=0; i<n; i++)
if(label[0][i]!=label[1][i])
return true;
return false;
}
void initLabel(int n, int *lab, int* label[])
{
label[0]=(int*)malloc(n*sizeof(int));
label[1]=(int*)malloc(n*sizeof(int));
if(!label[0] || !label[1])panic("no mem!\n");
memcpy(label[0], lab, n*sizeof(int));
memset(label[1], 0, n*sizeof(int));
}
void freeLabel(int* label[])
{
free(label[0]);
free(label[1]);
}
void copyLabel(int n, int* lab, int* label)
{
memcpy(lab, label, n*sizeof(int));
}
void GPUKmeans::kmeansClustering(int k, int n, int len, float *vec[],
int* lab)
{
float *center, *d_center;
int *d_label, *label[2];
float* d_vec;
bool changed=true;
int cur=0, iter;
initLabel(n, lab, label);
initCenter(k, len, ¢er, &d_center);
initVec(n, len, label[cur], vec, &d_vec);
cudaMalloc((void**)&d_label, n*sizeof(int));
for(iter=0; iter<=10 && changed; iter++){
updateCenter(k, n, len, vec, label[cur], center);
cudaMemcpy(d_center, center, k*(len+1)*sizeof(float), cudaMemcpyHostToDevice);
/* vec_print(d_center, 10); */
updateLabel(k, n, len, d_vec, d_label, d_center);
cur^=1;
cudaMemcpy(label[cur], d_label, n*sizeof(int), cudaMemcpyDeviceToHost);
/* changed=isChanged(n, label); */
}
copyLabel(n, lab, label[cur]);
freeLabel(label);
freeVec(d_vec);
freeCenter(center);
printf("iter: %d\n", iter);
}
|
537594c14acc76e9807874f71d72b1acc7320cd5.hip | // !!! This is a file automatically generated by hipify!!!
#include <sstream>
#include <stdlib.h>
#include <windows.h>
#include <time.h>
#include "rgb_data.h"
#include "resnet.cuh"
#ifdef INT8x4_EXT_CONFIG
#define IMAGE_FILE "D:\\zhaohengrui\\data\\imagenet\\val_image_NCHW_VECT_C.data"
#elif defined FLOAT_CONFIG
#define IMAGE_FILE "D:\\zhaohengrui\\data\\imagenet\\val_image_NCHW.data"
#endif
#define LABEL_FILE "D:\\zhaohengrui\\data\\imagenet\\val_label.data"
#define RESNET152
#ifdef RESNET18
#ifdef INT8x4_EXT_CONFIG
#define RESNET_MODEL "model\\resnet18_blu35103_q.data"
#elif defined FLOAT_CONFIG
#define RESNET_MODEL "model\\resnet18_blu35103.data"
#endif
#define STEP_FILE "model\\quant_param89.data"
#define BLU_FILE "model\\3sigma.blu"
#define QUANT_FILE "model\\resnet18_blu35103_q.data"
#define BLOCKCLASS 0
#define BLOCK_NUM {2,2,2,2}
#elif defined RESNET50
#define RESNET_MODEL "model\\resnet50.data"
#define BLOCKCLASS 1
#define BLOCK_NUM {3,4,6,3}
#elif defined RESNET152
#ifdef INT8x4_EXT_CONFIG
#define RESNET_MODEL "model\\resnet152_blu100_q.data"
#elif defined FLOAT_CONFIG
#define RESNET_MODEL "model\\resnet152_blu100.data"
#endif
#define STEP_FILE "model\\resnet152_quant_param.data"
#define BLU_FILE "model\\resnet152_blu.data"
#define QUANT_FILE "model\\resnet152_blu100_q.data"
#define BLOCKCLASS 1
#define BLOCK_NUM {3,8,36,3}
#endif
int quantizeNsave(void)
{
int i;
int num_gpus;
cudnnHandle_t cudnnHandle;
hipGetDeviceCount(&num_gpus);
hipSetDevice(0);
cudnnCreate(&cudnnHandle);
int resnet_blocks[4] = BLOCK_NUM;
Resnet resnet(cudnnHandle, 1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, BLOCKCLASS, resnet_blocks);
resnet.load_para(RESNET_MODEL);
resnet.quantizeNsave(STEP_FILE, BLU_FILE, QUANT_FILE);
return 0;
}
int test_data(void)
{
rgb_data val(50, 224, 224, 3);
val.read_frame(IMAGE_FILE, LABEL_FILE, 12);
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();
return 0;
}
int test_conv(void)
{
int num_gpus;
cudnnHandle_t cudnnHandle;
rgb_data val(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL);
InputLayer I1;
CovLayer C1;
PoolLayer P1;
BasicBlock B1;
BlockLayer BL1;
FILE*para_fp;
int workspaceSize;
void*workspace;
hipGetDeviceCount(&num_gpus);
hipSetDevice(0);
cudnnCreate(&cudnnHandle);
I1.build(2, 128, 28, 28);
C1.build(cudnnHandle, I1.x_outDesc, 2, 28, 28, 128, 128, 3, 1);
P1.build(CUDNN_POOLING_MAX, 1, 64, C1.outHeight, C1.outWidth, 3, 2);
B1.build(cudnnHandle, P1.uDesc, 1, P1.outHeight, P1.outWidth, 64, 64);
BL1.build(cudnnHandle, P1.uDesc, 1, P1.outHeight, P1.outWidth, 64, 1, 64, 0, 2);
workspaceSize = C1.workspaceSize > B1.workspaceSize ? C1.workspaceSize : B1.workspaceSize;
hipMalloc(&workspace, workspaceSize);
load_tensor((float*)I1.x_out, 2 * 28 * 28 * 128, "block2.data", 0);
load_tensor((float*)C1.w, 3 * 3 * 128 * 128, "block2.data", 2 * 28 * 28 * 128 * 2 * 4);
C1.ConvForward(cudnnHandle, I1.x_outDesc, I1.x_out, workspace, workspaceSize);
mse((float*)C1.u, 2 * 28 * 28 * 128, 1, "block2.data", 2 * 28 * 28 * 128 * 4);
fopen_s(¶_fp, RESNET_MODEL, "rb");
C1.load_para(para_fp);
B1.load_para(para_fp);
fclose(para_fp);
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();
I1.load(val.ppro);
//I1.ppro(cudnnHandle);
C1.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace, C1.workspaceSize);
C1.batch_norm(cudnnHandle);
C1.activate(cudnnHandle);
//C1.viewmem((xwtype*)I1.x);
P1.pool(cudnnHandle, C1.yDesc, C1.y);
//P1.viewmem();
B1.forward(cudnnHandle, &P1, workspace);
return 0;
}
int test_resnet(int batch_size)
{
int i;
int num_gpus;
LARGE_INTEGER StartingTime, EndingTime, ElapsedMicroseconds;
LARGE_INTEGER Frequency;
cudnnHandle_t cudnnHandle;
QueryPerformanceFrequency(&Frequency);
hipGetDeviceCount(&num_gpus);
hipSetDevice(0);
cudnnCreate(&cudnnHandle);
int resnet_blocks[4] = BLOCK_NUM;
rgb_data val(batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL);
Resnet resnet(cudnnHandle, batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, BLOCKCLASS,resnet_blocks);
resnet.load_para(RESNET_MODEL);
//val.iter = 999;
for (i = 0;i < NUM_IMAGES/batch_size;i++)
{
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();//normalize
resnet.load_data(val.ppro);
hipDeviceSynchronize();
QueryPerformanceCounter(&StartingTime);
resnet.forward(cudnnHandle);
hipDeviceSynchronize();
QueryPerformanceCounter(&EndingTime);
ElapsedMicroseconds.QuadPart = EndingTime.QuadPart - StartingTime.QuadPart;
ElapsedMicroseconds.QuadPart *= 1000000;
ElapsedMicroseconds.QuadPart /= Frequency.QuadPart;
printf("time:%lldus\n", ElapsedMicroseconds.QuadPart);
val.loadPred_GPU(resnet.FC1.u);
val.batch_accuracy(5);
}
val.accuracy();
return 0;
}
int run_all(void)
{
//quantizeNsave();
//test_data();
//test_conv();
test_resnet(50);
return 0;
}
int main(int argc, char**argv)
{
//run_all(ORI_FILE, INPUT_FILE, HEIGHT, WIDTH);
run_all();
//quantizeNsave();
//system("pause");
}
| 537594c14acc76e9807874f71d72b1acc7320cd5.cu | #include <sstream>
#include <stdlib.h>
#include <windows.h>
#include <time.h>
#include "rgb_data.h"
#include "resnet.cuh"
#ifdef INT8x4_EXT_CONFIG
#define IMAGE_FILE "D:\\zhaohengrui\\data\\imagenet\\val_image_NCHW_VECT_C.data"
#elif defined FLOAT_CONFIG
#define IMAGE_FILE "D:\\zhaohengrui\\data\\imagenet\\val_image_NCHW.data"
#endif
#define LABEL_FILE "D:\\zhaohengrui\\data\\imagenet\\val_label.data"
#define RESNET152
#ifdef RESNET18
#ifdef INT8x4_EXT_CONFIG
#define RESNET_MODEL "model\\resnet18_blu35103_q.data"
#elif defined FLOAT_CONFIG
#define RESNET_MODEL "model\\resnet18_blu35103.data"
#endif
#define STEP_FILE "model\\quant_param89.data"
#define BLU_FILE "model\\3sigma.blu"
#define QUANT_FILE "model\\resnet18_blu35103_q.data"
#define BLOCKCLASS 0
#define BLOCK_NUM {2,2,2,2}
#elif defined RESNET50
#define RESNET_MODEL "model\\resnet50.data"
#define BLOCKCLASS 1
#define BLOCK_NUM {3,4,6,3}
#elif defined RESNET152
#ifdef INT8x4_EXT_CONFIG
#define RESNET_MODEL "model\\resnet152_blu100_q.data"
#elif defined FLOAT_CONFIG
#define RESNET_MODEL "model\\resnet152_blu100.data"
#endif
#define STEP_FILE "model\\resnet152_quant_param.data"
#define BLU_FILE "model\\resnet152_blu.data"
#define QUANT_FILE "model\\resnet152_blu100_q.data"
#define BLOCKCLASS 1
#define BLOCK_NUM {3,8,36,3}
#endif
int quantizeNsave(void)
{
int i;
int num_gpus;
cudnnHandle_t cudnnHandle;
cudaGetDeviceCount(&num_gpus);
cudaSetDevice(0);
cudnnCreate(&cudnnHandle);
int resnet_blocks[4] = BLOCK_NUM;
Resnet resnet(cudnnHandle, 1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, BLOCKCLASS, resnet_blocks);
resnet.load_para(RESNET_MODEL);
resnet.quantizeNsave(STEP_FILE, BLU_FILE, QUANT_FILE);
return 0;
}
int test_data(void)
{
rgb_data val(50, 224, 224, 3);
val.read_frame(IMAGE_FILE, LABEL_FILE, 12);
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();
return 0;
}
int test_conv(void)
{
int num_gpus;
cudnnHandle_t cudnnHandle;
rgb_data val(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL);
InputLayer I1;
CovLayer C1;
PoolLayer P1;
BasicBlock B1;
BlockLayer BL1;
FILE*para_fp;
int workspaceSize;
void*workspace;
cudaGetDeviceCount(&num_gpus);
cudaSetDevice(0);
cudnnCreate(&cudnnHandle);
I1.build(2, 128, 28, 28);
C1.build(cudnnHandle, I1.x_outDesc, 2, 28, 28, 128, 128, 3, 1);
P1.build(CUDNN_POOLING_MAX, 1, 64, C1.outHeight, C1.outWidth, 3, 2);
B1.build(cudnnHandle, P1.uDesc, 1, P1.outHeight, P1.outWidth, 64, 64);
BL1.build(cudnnHandle, P1.uDesc, 1, P1.outHeight, P1.outWidth, 64, 1, 64, 0, 2);
workspaceSize = C1.workspaceSize > B1.workspaceSize ? C1.workspaceSize : B1.workspaceSize;
cudaMalloc(&workspace, workspaceSize);
load_tensor((float*)I1.x_out, 2 * 28 * 28 * 128, "block2.data", 0);
load_tensor((float*)C1.w, 3 * 3 * 128 * 128, "block2.data", 2 * 28 * 28 * 128 * 2 * 4);
C1.ConvForward(cudnnHandle, I1.x_outDesc, I1.x_out, workspace, workspaceSize);
mse((float*)C1.u, 2 * 28 * 28 * 128, 1, "block2.data", 2 * 28 * 28 * 128 * 4);
fopen_s(¶_fp, RESNET_MODEL, "rb");
C1.load_para(para_fp);
B1.load_para(para_fp);
fclose(para_fp);
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();
I1.load(val.ppro);
//I1.ppro(cudnnHandle);
C1.ConvForward(cudnnHandle, I1.xDesc, I1.x, workspace, C1.workspaceSize);
C1.batch_norm(cudnnHandle);
C1.activate(cudnnHandle);
//C1.viewmem((xwtype*)I1.x);
P1.pool(cudnnHandle, C1.yDesc, C1.y);
//P1.viewmem();
B1.forward(cudnnHandle, &P1, workspace);
return 0;
}
int test_resnet(int batch_size)
{
int i;
int num_gpus;
LARGE_INTEGER StartingTime, EndingTime, ElapsedMicroseconds;
LARGE_INTEGER Frequency;
cudnnHandle_t cudnnHandle;
QueryPerformanceFrequency(&Frequency);
cudaGetDeviceCount(&num_gpus);
cudaSetDevice(0);
cudnnCreate(&cudnnHandle);
int resnet_blocks[4] = BLOCK_NUM;
rgb_data val(batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL);
Resnet resnet(cudnnHandle, batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL, BLOCKCLASS,resnet_blocks);
resnet.load_para(RESNET_MODEL);
//val.iter = 999;
for (i = 0;i < NUM_IMAGES/batch_size;i++)
{
val.next_batch(IMAGE_FILE, LABEL_FILE);
val.preprocess();//normalize
resnet.load_data(val.ppro);
cudaDeviceSynchronize();
QueryPerformanceCounter(&StartingTime);
resnet.forward(cudnnHandle);
cudaDeviceSynchronize();
QueryPerformanceCounter(&EndingTime);
ElapsedMicroseconds.QuadPart = EndingTime.QuadPart - StartingTime.QuadPart;
ElapsedMicroseconds.QuadPart *= 1000000;
ElapsedMicroseconds.QuadPart /= Frequency.QuadPart;
printf("time:%lldus\n", ElapsedMicroseconds.QuadPart);
val.loadPred_GPU(resnet.FC1.u);
val.batch_accuracy(5);
}
val.accuracy();
return 0;
}
int run_all(void)
{
//quantizeNsave();
//test_data();
//test_conv();
test_resnet(50);
return 0;
}
int main(int argc, char**argv)
{
//run_all(ORI_FILE, INPUT_FILE, HEIGHT, WIDTH);
run_all();
//quantizeNsave();
//system("pause");
}
|
6571ffa3ab6c3a216b683309221304d55200b29e.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime_api.h>
#include <stdint.h>
__global__ void estimate_conv_mean_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
/*int u = idx % spatial_dim;
int c = (idx / spatial_dim) % channels;*/
int c = idx % channels;
int u = (idx / channels) % spatial_dim;
int batch_idx = idx / (channels * spatial_dim);
if (c < channels && u < spatial_dim && batch_idx < batch_size) {
//float dy = src[idx];
int i = u + c * spatial_dim + batch_idx * spatial_dim * channels;
float dy = src[i];
atomicAdd(&mean[c], dy);
}
}
__global__ void estimate_conv_mean_fast_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int unroll_spatial_dim = (spatial_dim+32-1)/32;
int c = idx % channels;
int u0 = ((idx / channels) % unroll_spatial_dim) * 32;
int batch_idx = idx / (channels * unroll_spatial_dim);
if (c < channels && u0 < spatial_dim && batch_idx < batch_size) {
float dy = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * channels;
int u_limit = min(u0+32, spatial_dim);
for (int u = u0; u < u_limit; u++) {
int i = i0 + u;
dy += src[i];
}
atomicAdd(&mean[c], dy);
}
}
__global__ void estimate_conv_mean_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
float *mean)
{
__shared__ float mean_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
y += src[i];
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
y += src[i];
}
}
mean_cache[bank_idx] = y;
} else {
mean_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (mean_cache[bank_idx] + mean_cache[bank_idx+16]) / ((float)(spatial_dim) * (float)(batch_size));
atomicAdd(&mean[c], y);
}
}
}
extern "C" void rembrandt_kernel_estimate_conv_mean_batch(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean,
hipStream_t stream)
{
int n = spatial_dim * channels * batch_size;
hipLaunchKernelGGL(( estimate_conv_mean_batch_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
src, spatial_dim, channels, batch_size, mean);
}
extern "C" void rembrandt_kernel_estimate_conv_mean_fast_batch(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
float *mean,
hipStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
hipLaunchKernelGGL(( estimate_conv_mean_fast2_batch_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
src, spatial_dim, num_channels, batch_size, mean);
}
__global__ void estimate_conv_var_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
/*int u = idx % spatial_dim;
int c = (idx / spatial_dim) % channels;*/
int c = idx % channels;
int u = (idx / channels) % spatial_dim;
int batch_idx = idx / (channels * spatial_dim);
if (c < channels && u < spatial_dim && batch_idx < batch_size) {
int i = u + c * spatial_dim + batch_idx * spatial_dim * channels;
float mean_c = mean[c] / ((float)(batch_size));
//float delta = src[idx] - mean_c;
float delta = src[i] - mean_c;
float dy = delta * delta;
atomicAdd(&var[c], dy);
}
}
__global__ void estimate_conv_var_fast_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int unroll_spatial_dim = (spatial_dim+32-1)/32;
int c = idx % channels;
int u0 = ((idx / channels) % unroll_spatial_dim) * 32;
int batch_idx = idx / (channels * unroll_spatial_dim);
if (c < channels && u0 < spatial_dim && batch_idx < batch_size) {
float mean_c = mean[c] / ((float)(batch_size));
float dy = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * channels;
int u_limit = min(u0+32, spatial_dim);
for (int u = u0; u < u_limit; u++) {
int i = i0 + u;
float delta = src[i] - mean_c;
dy += delta * delta;
}
atomicAdd(&var[c], dy);
}
}
__global__ void estimate_conv_var_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
const float *__restrict__ mean,
float *var)
{
__shared__ float var_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float mean_c = mean[c];
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
float delta = src[i] - mean_c;
y += delta * delta;
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
float delta = src[i] - mean_c;
y += delta * delta;
}
}
var_cache[bank_idx] = y;
} else {
var_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (var_cache[bank_idx] + var_cache[bank_idx+16]) / ((float)(spatial_dim-1) * (float)(batch_size-1));
atomicAdd(&var[c], y);
}
}
}
extern "C" void rembrandt_kernel_estimate_conv_var_batch(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var,
hipStream_t stream)
{
int n = spatial_dim * channels * batch_size;
hipLaunchKernelGGL(( estimate_conv_var_batch_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
src, spatial_dim, channels, batch_size, mean, var);
}
extern "C" void rembrandt_kernel_estimate_conv_var_fast_batch(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
const float *mean,
float *var,
hipStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
hipLaunchKernelGGL(( estimate_conv_var_fast2_batch_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream,
src, spatial_dim, num_channels, batch_size, mean, var);
}
__global__ void estimate_online_var_kernel(
const float *mean_batch,
int dim,
const float *var_batch,
const float *mean_acc,
int batch_size,
int acc_size,
float *var_acc)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float var_a = var_acc[idx];
float var_b = var_batch[idx];
float delta = mean_batch[idx] / ((float)(batch_size)) - mean_acc[idx] / ((float)(acc_size));
float y = var_a + var_b + ((float)(acc_size)) / ((float)(acc_size + batch_size)) * ((float)(batch_size)) * delta * delta;
var_acc[idx] = y;
}
}
extern "C" void rembrandt_kernel_estimate_online_var(
const float *mean_batch,
int dim,
const float *var_batch,
const float *mean_acc,
int batch_size,
int acc_size,
float *var_acc,
hipStream_t stream)
{
hipLaunchKernelGGL(( estimate_online_var_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
mean_batch, dim, var_batch, mean_acc, batch_size, acc_size, var_acc);
}
__global__ void estimate_invstd_kernel(
const float *var,
int dim,
float epsilon,
float *invstd)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = rsqrtf(var[idx] + epsilon);
invstd[idx] = y;
}
}
extern "C" void rembrandt_kernel_estimate_invstd(
const float *var,
int dim,
float epsilon,
float *invstd,
hipStream_t stream)
{
hipLaunchKernelGGL(( estimate_invstd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
var, dim, epsilon, invstd);
}
| 6571ffa3ab6c3a216b683309221304d55200b29e.cu | #include "common.h"
#include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void estimate_conv_mean_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
/*int u = idx % spatial_dim;
int c = (idx / spatial_dim) % channels;*/
int c = idx % channels;
int u = (idx / channels) % spatial_dim;
int batch_idx = idx / (channels * spatial_dim);
if (c < channels && u < spatial_dim && batch_idx < batch_size) {
//float dy = src[idx];
int i = u + c * spatial_dim + batch_idx * spatial_dim * channels;
float dy = src[i];
atomicAdd(&mean[c], dy);
}
}
__global__ void estimate_conv_mean_fast_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int unroll_spatial_dim = (spatial_dim+32-1)/32;
int c = idx % channels;
int u0 = ((idx / channels) % unroll_spatial_dim) * 32;
int batch_idx = idx / (channels * unroll_spatial_dim);
if (c < channels && u0 < spatial_dim && batch_idx < batch_size) {
float dy = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * channels;
int u_limit = min(u0+32, spatial_dim);
for (int u = u0; u < u_limit; u++) {
int i = i0 + u;
dy += src[i];
}
atomicAdd(&mean[c], dy);
}
}
__global__ void estimate_conv_mean_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
float *mean)
{
__shared__ float mean_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
y += src[i];
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
y += src[i];
}
}
mean_cache[bank_idx] = y;
} else {
mean_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (mean_cache[bank_idx] + mean_cache[bank_idx+16]) / ((float)(spatial_dim) * (float)(batch_size));
atomicAdd(&mean[c], y);
}
}
}
extern "C" void rembrandt_kernel_estimate_conv_mean_batch(
const float *src,
int spatial_dim,
int channels,
int batch_size,
float *mean,
cudaStream_t stream)
{
int n = spatial_dim * channels * batch_size;
estimate_conv_mean_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, channels, batch_size, mean);
}
extern "C" void rembrandt_kernel_estimate_conv_mean_fast_batch(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
float *mean,
cudaStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
estimate_conv_mean_fast2_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, num_channels, batch_size, mean);
}
__global__ void estimate_conv_var_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
/*int u = idx % spatial_dim;
int c = (idx / spatial_dim) % channels;*/
int c = idx % channels;
int u = (idx / channels) % spatial_dim;
int batch_idx = idx / (channels * spatial_dim);
if (c < channels && u < spatial_dim && batch_idx < batch_size) {
int i = u + c * spatial_dim + batch_idx * spatial_dim * channels;
float mean_c = mean[c] / ((float)(batch_size));
//float delta = src[idx] - mean_c;
float delta = src[i] - mean_c;
float dy = delta * delta;
atomicAdd(&var[c], dy);
}
}
__global__ void estimate_conv_var_fast_batch_kernel(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int unroll_spatial_dim = (spatial_dim+32-1)/32;
int c = idx % channels;
int u0 = ((idx / channels) % unroll_spatial_dim) * 32;
int batch_idx = idx / (channels * unroll_spatial_dim);
if (c < channels && u0 < spatial_dim && batch_idx < batch_size) {
float mean_c = mean[c] / ((float)(batch_size));
float dy = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * channels;
int u_limit = min(u0+32, spatial_dim);
for (int u = u0; u < u_limit; u++) {
int i = i0 + u;
float delta = src[i] - mean_c;
dy += delta * delta;
}
atomicAdd(&var[c], dy);
}
}
__global__ void estimate_conv_var_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
const float *__restrict__ mean,
float *var)
{
__shared__ float var_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float mean_c = mean[c];
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
float delta = src[i] - mean_c;
y += delta * delta;
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
float delta = src[i] - mean_c;
y += delta * delta;
}
}
var_cache[bank_idx] = y;
} else {
var_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (var_cache[bank_idx] + var_cache[bank_idx+16]) / ((float)(spatial_dim-1) * (float)(batch_size-1));
atomicAdd(&var[c], y);
}
}
}
extern "C" void rembrandt_kernel_estimate_conv_var_batch(
const float *src,
int spatial_dim,
int channels,
int batch_size,
const float *mean,
float *var,
cudaStream_t stream)
{
int n = spatial_dim * channels * batch_size;
estimate_conv_var_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, channels, batch_size, mean, var);
}
extern "C" void rembrandt_kernel_estimate_conv_var_fast_batch(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
const float *mean,
float *var,
cudaStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
estimate_conv_var_fast2_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, num_channels, batch_size, mean, var);
}
__global__ void estimate_online_var_kernel(
const float *mean_batch,
int dim,
const float *var_batch,
const float *mean_acc,
int batch_size,
int acc_size,
float *var_acc)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float var_a = var_acc[idx];
float var_b = var_batch[idx];
float delta = mean_batch[idx] / ((float)(batch_size)) - mean_acc[idx] / ((float)(acc_size));
float y = var_a + var_b + ((float)(acc_size)) / ((float)(acc_size + batch_size)) * ((float)(batch_size)) * delta * delta;
var_acc[idx] = y;
}
}
extern "C" void rembrandt_kernel_estimate_online_var(
const float *mean_batch,
int dim,
const float *var_batch,
const float *mean_acc,
int batch_size,
int acc_size,
float *var_acc,
cudaStream_t stream)
{
estimate_online_var_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
mean_batch, dim, var_batch, mean_acc, batch_size, acc_size, var_acc);
}
__global__ void estimate_invstd_kernel(
const float *var,
int dim,
float epsilon,
float *invstd)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y = rsqrtf(var[idx] + epsilon);
invstd[idx] = y;
}
}
extern "C" void rembrandt_kernel_estimate_invstd(
const float *var,
int dim,
float epsilon,
float *invstd,
cudaStream_t stream)
{
estimate_invstd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
var, dim, epsilon, invstd);
}
|
abac1eb121de52fd19d924bcc9d2131e10742f76.hip | // !!! This is a file automatically generated by hipify!!!
/* class RSAGPUAttack implementation in CUDA
* 3/16/2017
*/
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include <hiprand/hiprand.h>
#include <fstream>
#include <stdexcept>
#include <iostream>
using namespace std;
#include "RSAGPUAttack.h"
#include "rsa_gpu.hh"
#include "tanc.hh"
#define checkCurandErrors(x) do { hiprandStatus_t status= (x);\
if (status != HIPRAND_STATUS_SUCCESS) { \
printf("Error %d at %s:%d\n", status, __FILE__, __LINE__); \
exit(EXIT_FAILURE);}} while(0)
RSAGPUAttack::RSAGPUAttack(
int traceNum_,
int traceSize_,
int seed_,
SW_Type swType,
const char *fileName,
int keyBits_)
:RSAGPU(swType, KEY_BITS / 8, fileName, keyBits_), traceNum(traceNum_), traceSize(traceSize_), seed(seed_) {
// Allocated device memory for traceNum * traceSize of msg
checkCudaErrors(hipMalloc(&deviceMsg, traceNum * traceSize * keyBytes));
// Device memory for time info and reset to 0
checkCudaErrors(hipMalloc(&deviceTime, traceNum * sizeof(uint64_t)));
checkCudaErrors(hipMemset(deviceTime, 0, traceNum * sizeof(int64_t)));
// Host memory for time info and reduction
hostTime = new uint64_t[traceNum];
}
RSAGPUAttack::~RSAGPUAttack() {
// Free device and host memory
checkCudaErrors(hipFree(deviceMsg));
checkCudaErrors(hipFree(deviceTime));
delete [] hostTime;
}
void RSAGPUAttack::genRandMsg() {
// Generate rand msg
hiprandGenerator_t gen;
checkCurandErrors(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
checkCurandErrors(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
checkCurandErrors(hiprandGenerate(
gen, (unsigned int *)deviceMsg, traceNum * traceSize * keyBytes / sizeof(unsigned int)));
checkCurandErrors(hiprandDestroyGenerator(gen));
checkCudaErrors(hipDeviceSynchronize());
}
// Record the running time of decryption of msges
void RSAGPUAttack::recordTime(const char* timeFileName) {
// Time the decryption
int gridSize = (traceSize * keySize + BLK_SIZE -1) / BLK_SIZE;
cout << "record time grid size " << gridSize << endl;
for (int i = 0; i < traceNum; i++) {
switch (swType) {
case (SW_Type::none):
hipLaunchKernelGGL(( gpu_modexp_timing), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceSize, keySize, deviceMsg + i * keySize * traceSize, deviceTime + i);
break;
case (SW_Type::clnw):
case (SW_Type::vlnw):
hipLaunchKernelGGL(( gpu_modexp_timing_sw), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceSize, keySize, deviceMsg + i * keySize * traceSize, deviceTime + i);
break;
}
// single time copy to avoid kernel overlapping
checkCudaErrors(hipMemcpy(hostTime + i, deviceTime + i, sizeof(uint64_t), hipMemcpyDeviceToHost));
}
// Save timing into file
ofstream timeFile(timeFileName, ios::binary | ios::out);
if (!timeFile) throw runtime_error("Could not open time file");
timeFile.write((const char*)hostTime, traceNum * sizeof(uint64_t));
timeFile.close();
}
// Record the reductions of decryption of msges
// The size of reduction is the same as msg size and key size for binary MP
void RSAGPUAttack::recordReduction(const char * reductionFileName) {
// Device memory for reduction, for each msg, there will be KEY_BITS reductions, needing KEY_BITS bits memory
// Double the size of reduction by including both Square and Multiplication
uint64_t* deviceReduction;
checkCudaErrors(hipMalloc(&deviceReduction, traceNum * traceSize * keyBytes * 2));
checkCudaErrors(hipMemset(deviceReduction, 0, traceNum * traceSize * keyBytes * 2));
uint64_t* hostReduction = new uint64_t[traceNum * traceSize * keySize * 2];
int gridSize = (BLK_SIZE - 1 + traceNum * traceSize * keySize) / BLK_SIZE;
hipLaunchKernelGGL(( gpu_modexp_reduction), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceNum * traceSize, keySize, deviceMsg, deviceReduction);
checkCudaErrors(hipMemcpy
(hostReduction, deviceReduction, traceNum * traceSize * keyBytes * 2, hipMemcpyDeviceToHost));
// Save timing into file
ofstream reductionFile(reductionFileName, ios::binary | ios::out);
if (!reductionFile) throw runtime_error("Could not open time file");
reductionFile.write((const char*)hostReduction, traceNum * traceSize * keyBytes * 2);
reductionFile.close();
checkCudaErrors(hipFree(deviceReduction));
delete [] hostReduction;
}
void RSAGPUAttack::timingAttack(const char *fileName) {
// Load time info from fileName
uint64_t *timeInfo = new uint64_t[traceNum];
ifstream dataFile(fileName, ios::binary | ios::in);
if (!dataFile) throw runtime_error("Could not open file for timming Attack.");
dataFile.read((char *)timeInfo, traceNum * sizeof(uint64_t));
WORD *msges0, *msges1, *msges_mont; // device memory for intermediate result
checkCudaErrors(hipMalloc(&msges0, traceNum * traceSize * keyBytes));
int gridSize = (traceNum * traceSize * keySize + BLK_SIZE - 1) / BLK_SIZE;
hipLaunchKernelGGL(( gpu_preprocessing_sw), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceNum * traceSize, keySize, deviceMsg, msges0);
// Attack
switch (swType) {
case (SW_Type::clnw):
hipLaunchKernelGGL(( gpu_preprocessing_sw), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceNum * traceSize, keySize, deviceMsg, msges0);
dt_attack_sw(keySize, traceSize, traceNum, timeInfo, key.clnw, msges0, 128);
break;
case (SW_Type::vlnw):
hipLaunchKernelGGL(( gpu_preprocessing_sw), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceNum * traceSize, keySize, deviceMsg, msges0);
dt_attack_vlnw(keySize, traceSize, traceNum, timeInfo, key.vlnw, msges0, 158);
break;
case (SW_Type::none):
checkCudaErrors(hipMalloc(&msges1, traceNum * traceSize * keyBytes));
checkCudaErrors(hipMalloc(&msges_mont, traceNum * traceSize * keyBytes));
hipLaunchKernelGGL(( gpu_preprocessing), dim3(gridSize), dim3(BLK_SIZE), 0, 0,
traceSize * traceNum, keySize, deviceMsg, msges0, msges_mont);
int bit_size = 1;
int bit_num = 508;
dt_attack(keySize, traceSize, traceNum, bit_size, bit_num, timeInfo,
key.d, msges0, msges1, msges_mont);
checkCudaErrors(hipFree(msges1));
checkCudaErrors(hipFree(msges_mont));
break;
}
// Free memory
delete [] timeInfo;
checkCudaErrors(hipFree(msges0));
}
| abac1eb121de52fd19d924bcc9d2131e10742f76.cu | /* class RSAGPUAttack implementation in CUDA
* 3/16/2017
*/
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include <curand.h>
#include <fstream>
#include <stdexcept>
#include <iostream>
using namespace std;
#include "RSAGPUAttack.h"
#include "rsa_gpu.hh"
#include "tanc.hh"
#define checkCurandErrors(x) do { curandStatus_t status= (x);\
if (status != CURAND_STATUS_SUCCESS) { \
printf("Error %d at %s:%d\n", status, __FILE__, __LINE__); \
exit(EXIT_FAILURE);}} while(0)
RSAGPUAttack::RSAGPUAttack(
int traceNum_,
int traceSize_,
int seed_,
SW_Type swType,
const char *fileName,
int keyBits_)
:RSAGPU(swType, KEY_BITS / 8, fileName, keyBits_), traceNum(traceNum_), traceSize(traceSize_), seed(seed_) {
// Allocated device memory for traceNum * traceSize of msg
checkCudaErrors(cudaMalloc(&deviceMsg, traceNum * traceSize * keyBytes));
// Device memory for time info and reset to 0
checkCudaErrors(cudaMalloc(&deviceTime, traceNum * sizeof(uint64_t)));
checkCudaErrors(cudaMemset(deviceTime, 0, traceNum * sizeof(int64_t)));
// Host memory for time info and reduction
hostTime = new uint64_t[traceNum];
}
RSAGPUAttack::~RSAGPUAttack() {
// Free device and host memory
checkCudaErrors(cudaFree(deviceMsg));
checkCudaErrors(cudaFree(deviceTime));
delete [] hostTime;
}
void RSAGPUAttack::genRandMsg() {
// Generate rand msg
curandGenerator_t gen;
checkCurandErrors(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
checkCurandErrors(curandSetPseudoRandomGeneratorSeed(gen, seed));
checkCurandErrors(curandGenerate(
gen, (unsigned int *)deviceMsg, traceNum * traceSize * keyBytes / sizeof(unsigned int)));
checkCurandErrors(curandDestroyGenerator(gen));
checkCudaErrors(cudaDeviceSynchronize());
}
// Record the running time of decryption of msges
void RSAGPUAttack::recordTime(const char* timeFileName) {
// Time the decryption
int gridSize = (traceSize * keySize + BLK_SIZE -1) / BLK_SIZE;
cout << "record time grid size " << gridSize << endl;
for (int i = 0; i < traceNum; i++) {
switch (swType) {
case (SW_Type::none):
gpu_modexp_timing<<<gridSize, BLK_SIZE>>>
(traceSize, keySize, deviceMsg + i * keySize * traceSize, deviceTime + i);
break;
case (SW_Type::clnw):
case (SW_Type::vlnw):
gpu_modexp_timing_sw<<<gridSize, BLK_SIZE>>>
(traceSize, keySize, deviceMsg + i * keySize * traceSize, deviceTime + i);
break;
}
// single time copy to avoid kernel overlapping
checkCudaErrors(cudaMemcpy(hostTime + i, deviceTime + i, sizeof(uint64_t), cudaMemcpyDeviceToHost));
}
// Save timing into file
ofstream timeFile(timeFileName, ios::binary | ios::out);
if (!timeFile) throw runtime_error("Could not open time file");
timeFile.write((const char*)hostTime, traceNum * sizeof(uint64_t));
timeFile.close();
}
// Record the reductions of decryption of msges
// The size of reduction is the same as msg size and key size for binary MP
void RSAGPUAttack::recordReduction(const char * reductionFileName) {
// Device memory for reduction, for each msg, there will be KEY_BITS reductions, needing KEY_BITS bits memory
// Double the size of reduction by including both Square and Multiplication
uint64_t* deviceReduction;
checkCudaErrors(cudaMalloc(&deviceReduction, traceNum * traceSize * keyBytes * 2));
checkCudaErrors(cudaMemset(deviceReduction, 0, traceNum * traceSize * keyBytes * 2));
uint64_t* hostReduction = new uint64_t[traceNum * traceSize * keySize * 2];
int gridSize = (BLK_SIZE - 1 + traceNum * traceSize * keySize) / BLK_SIZE;
gpu_modexp_reduction<<<gridSize, BLK_SIZE>>>
(traceNum * traceSize, keySize, deviceMsg, deviceReduction);
checkCudaErrors(cudaMemcpy
(hostReduction, deviceReduction, traceNum * traceSize * keyBytes * 2, cudaMemcpyDeviceToHost));
// Save timing into file
ofstream reductionFile(reductionFileName, ios::binary | ios::out);
if (!reductionFile) throw runtime_error("Could not open time file");
reductionFile.write((const char*)hostReduction, traceNum * traceSize * keyBytes * 2);
reductionFile.close();
checkCudaErrors(cudaFree(deviceReduction));
delete [] hostReduction;
}
void RSAGPUAttack::timingAttack(const char *fileName) {
// Load time info from fileName
uint64_t *timeInfo = new uint64_t[traceNum];
ifstream dataFile(fileName, ios::binary | ios::in);
if (!dataFile) throw runtime_error("Could not open file for timming Attack.");
dataFile.read((char *)timeInfo, traceNum * sizeof(uint64_t));
WORD *msges0, *msges1, *msges_mont; // device memory for intermediate result
checkCudaErrors(cudaMalloc(&msges0, traceNum * traceSize * keyBytes));
int gridSize = (traceNum * traceSize * keySize + BLK_SIZE - 1) / BLK_SIZE;
gpu_preprocessing_sw<<<gridSize, BLK_SIZE>>>
(traceNum * traceSize, keySize, deviceMsg, msges0);
// Attack
switch (swType) {
case (SW_Type::clnw):
gpu_preprocessing_sw<<<gridSize, BLK_SIZE>>>
(traceNum * traceSize, keySize, deviceMsg, msges0);
dt_attack_sw(keySize, traceSize, traceNum, timeInfo, key.clnw, msges0, 128);
break;
case (SW_Type::vlnw):
gpu_preprocessing_sw<<<gridSize, BLK_SIZE>>>
(traceNum * traceSize, keySize, deviceMsg, msges0);
dt_attack_vlnw(keySize, traceSize, traceNum, timeInfo, key.vlnw, msges0, 158);
break;
case (SW_Type::none):
checkCudaErrors(cudaMalloc(&msges1, traceNum * traceSize * keyBytes));
checkCudaErrors(cudaMalloc(&msges_mont, traceNum * traceSize * keyBytes));
gpu_preprocessing<<<gridSize, BLK_SIZE>>>
(traceSize * traceNum, keySize, deviceMsg, msges0, msges_mont);
int bit_size = 1;
int bit_num = 508;
dt_attack(keySize, traceSize, traceNum, bit_size, bit_num, timeInfo,
key.d, msges0, msges1, msges_mont);
checkCudaErrors(cudaFree(msges1));
checkCudaErrors(cudaFree(msges_mont));
break;
}
// Free memory
delete [] timeInfo;
checkCudaErrors(cudaFree(msges0));
}
|
3abb9522e57ab3e66968ee6fdc3d9091bb75e5f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_4_a*(y)+xdim0_update_halo_kernel4_plus_4_a*ydim0_update_halo_kernel4_plus_4_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_4_a*(y)+xdim1_update_halo_kernel4_plus_4_a*ydim1_update_halo_kernel4_plus_4_a*(z))
//user function
__device__
inline void update_halo_kernel4_plus_4_a(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(4,0,0)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel4_plus_4_a + idx_z * 1 * xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel4_plus_4_a + idx_z * 1 * xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(105,"update_halo_kernel4_plus_4_a");
OPS_kernels[105].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h || ydim0 != ydim0_update_halo_kernel4_plus_4_a_h || xdim1 != xdim1_update_halo_kernel4_plus_4_a_h || ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[105].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[105].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[105].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[105].transfer += ops_compute_transfer(dim, range, &arg1);
}
| 3abb9522e57ab3e66968ee6fdc3d9091bb75e5f3.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_4_a;
int xdim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_4_a;
int ydim0_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_4_a;
int xdim1_update_halo_kernel4_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_4_a;
int ydim1_update_halo_kernel4_plus_4_a_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_4_a*(y)+xdim0_update_halo_kernel4_plus_4_a*ydim0_update_halo_kernel4_plus_4_a*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_4_a*(y)+xdim1_update_halo_kernel4_plus_4_a*ydim1_update_halo_kernel4_plus_4_a*(z))
//user function
__device__
inline void update_halo_kernel4_plus_4_a(double *vol_flux_y, double *mass_flux_y, const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(4,0,0)];
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel4_plus_4_a + idx_z * 1 * xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel4_plus_4_a + idx_z * 1 * xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_arg args[3] = { arg0, arg1, arg2};
ops_timing_realloc(105,"update_halo_kernel4_plus_4_a");
OPS_kernels[105].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h || ydim0 != ydim0_update_halo_kernel4_plus_4_a_h || xdim1 != xdim1_update_halo_kernel4_plus_4_a_h || ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int) );
xdim0_update_halo_kernel4_plus_4_a_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int) );
ydim0_update_halo_kernel4_plus_4_a_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int) );
xdim1_update_halo_kernel4_plus_4_a_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int) );
ydim1_update_halo_kernel4_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
ops_timers_core(&c1,&t1);
OPS_kernels[105].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_4_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[105].time += t2-t1;
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[105].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[105].transfer += ops_compute_transfer(dim, range, &arg1);
}
|
f81b7fd95485f0a5c93d0695222939ebcc01fe4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------
* Programmer(s): David J. Gardner @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the performance of the
* NVECTOR CUDA module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <sundials/sundials_types.h>
#include <nvector/nvector_cuda.h>
#include <sundials/sundials_math.h>
#include "test_nvector_performance.h"
/* private functions */
static int InitializeClearCache(int cachesize);
static int FinalizeClearCache();
/* private data for clearing cache */
static sunindextype N; /* data length */
static realtype* h_data; /* host data */
static realtype* h_sum; /* host sum */
static realtype* d_data; /* device data */
static realtype* d_sum; /* device sum */
static int blocksPerGrid;
/* cuda reduction kernel to clearing cache between tests */
__global__
void ClearCacheKernel(sunindextype N, realtype* data, realtype* out)
{
__shared__ realtype shared[256];
int sharedidx = blockIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
realtype tmp = 0;
while (tid < N) {
tmp += data[tid];
tid += blockDim.x * gridDim.x;
}
shared[sharedidx] = tmp;
__syncthreads();
/* assues blockDim is a power of 2 */
int i = blockDim.x/2;
while (i != 0) {
if (sharedidx < i)
shared[sharedidx] += shared[sharedidx + i];
__syncthreads();
i /= 2;
}
if (sharedidx == 0)
out[sharedidx] = shared[0];
}
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
SUNContext ctx = NULL; /* SUNDIALS context */
N_Vector X = NULL; /* test vector */
sunindextype veclen; /* vector length */
int print_timing; /* output timings */
int ntests; /* number of tests */
int nvecs; /* number of tests */
int nsums; /* number of sums */
int cachesize; /* size of cache (MB) */
int flag; /* return flag */
printf("\nStart Tests\n");
printf("Vector Name: Cuda\n");
/* check input and set vector length */
if (argc < 7){
printf("ERROR: SIX (6) arguments required: ");
printf("<vector length> <number of vectors> <number of sums> <number of tests> ");
printf("<cache size (MB)> <print timing>\n");
return(-1);
}
veclen = atol(argv[1]);
if (veclen <= 0) {
printf("ERROR: length of vector must be a positive integer \n");
return(-1);
}
nvecs = atol(argv[2]);
if (nvecs <= 0) {
printf("ERROR: number of vectors must be a positive integer \n");
return(-1);
}
nsums = atol(argv[3]);
if (nsums <= 0) {
printf("ERROR: number of sums must be a positive integer \n");
return(-1);
}
ntests = atol(argv[4]);
if (ntests <= 0) {
printf("ERROR: number of tests must be a positive integer \n");
return(-1);
}
cachesize = atol(argv[5]);
if (cachesize < 0) {
printf("ERROR: cache size (MB) must be a non-negative integer \n");
return(-1);
}
InitializeClearCache(cachesize);
print_timing = atoi(argv[6]);
SetTiming(print_timing, 0);
printf("\nRunning with: \n");
printf(" vector length %ld \n", (long int) veclen);
printf(" max number of vectors %d \n", nvecs);
printf(" max number of sums %d \n", nsums);
printf(" number of tests %d \n", ntests);
printf(" timing on/off %d \n", print_timing);
flag = SUNContext_Create(NULL, &ctx);
if (flag) return flag;
/* Create vectors */
X = N_VNew_Cuda(veclen, ctx);
/* run tests */
if (print_timing) printf("\n\n standard operations:\n");
if (print_timing) PrintTableHeader(1);
flag = Test_N_VLinearSum(X, veclen, ntests);
flag = Test_N_VConst(X, veclen, ntests);
flag = Test_N_VProd(X, veclen, ntests);
flag = Test_N_VDiv(X, veclen, ntests);
flag = Test_N_VScale(X, veclen, ntests);
flag = Test_N_VAbs(X, veclen, ntests);
flag = Test_N_VInv(X, veclen, ntests);
flag = Test_N_VAddConst(X, veclen, ntests);
flag = Test_N_VDotProd(X, veclen, ntests);
flag = Test_N_VMaxNorm(X, veclen, ntests);
flag = Test_N_VWrmsNorm(X, veclen, ntests);
flag = Test_N_VWrmsNormMask(X, veclen, ntests);
flag = Test_N_VMin(X, veclen, ntests);
flag = Test_N_VWL2Norm(X, veclen, ntests);
flag = Test_N_VL1Norm(X, veclen, ntests);
flag = Test_N_VCompare(X, veclen, ntests);
flag = Test_N_VInvTest(X, veclen, ntests);
flag = Test_N_VConstrMask(X, veclen, ntests);
flag = Test_N_VMinQuotient(X, veclen, ntests);
if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests);
flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests);
flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests);
flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests);
if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests);
flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests);
/* Free vectors */
N_VDestroy(X);
FinalizeClearCache();
flag = SUNContext_Free(&ctx);
if (flag) return flag;
printf("\nFinished Tests\n");
return(flag);
}
/* ----------------------------------------------------------------------
* Functions required by testing routines to fill vector data
* --------------------------------------------------------------------*/
/* random data between lower and upper */
void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper)
{
rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper);
N_VCopyToDevice_Cuda(Xvec);
}
/* series of 0 and 1 */
void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* random values for constraint array */
void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* ----------------------------------------------------------------------
* Functions required for MPI or GPU testing
* --------------------------------------------------------------------*/
void collect_times(N_Vector X, double *times, int ntimes)
{
/* not running with MPI, just return */
return;
}
void sync_device(N_Vector x)
{
hipDeviceSynchronize();
return;
}
/* ----------------------------------------------------------------------
* Functions required for clearing cache
* --------------------------------------------------------------------*/
static int InitializeClearCache(int cachesize)
{
hipError_t err; /* cuda error flag */
size_t nbytes; /* cache size in bytes */
/* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */
nbytes = (size_t) (2 * cachesize * 1024 * 1024);
N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype));
/* allocate host data */
blocksPerGrid = SUNMIN(32,(N+255)/256);
h_data = (realtype*) malloc(N*sizeof(realtype));
h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype));
/* allocate device data */
err = hipMalloc((void**) &d_data, N*sizeof(realtype));
if (err != hipSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
err = hipMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype));
if (err != hipSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
/* fill host vector with random data and copy to device */
rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0));
err = hipMemcpy(d_data, h_data, N*sizeof(realtype), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err);
return(-1);
}
return(0);
}
static int FinalizeClearCache()
{
hipError_t err; /* cuda error flag */
free(h_data);
free(h_sum);
err = hipFree(d_data);
if (err != hipSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
err = hipFree(d_sum);
if (err != hipSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
return(0);
}
void ClearCache()
{
/* call cuda kernel to clear the cache */
hipLaunchKernelGGL(( ClearCacheKernel), dim3(SUNMIN(32,(N+255)/256)), dim3(256), 0, 0, N, d_data, d_sum);
hipMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
return;
}
| f81b7fd95485f0a5c93d0695222939ebcc01fe4f.cu | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the performance of the
* NVECTOR CUDA module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <sundials/sundials_types.h>
#include <nvector/nvector_cuda.h>
#include <sundials/sundials_math.h>
#include "test_nvector_performance.h"
/* private functions */
static int InitializeClearCache(int cachesize);
static int FinalizeClearCache();
/* private data for clearing cache */
static sunindextype N; /* data length */
static realtype* h_data; /* host data */
static realtype* h_sum; /* host sum */
static realtype* d_data; /* device data */
static realtype* d_sum; /* device sum */
static int blocksPerGrid;
/* cuda reduction kernel to clearing cache between tests */
__global__
void ClearCacheKernel(sunindextype N, realtype* data, realtype* out)
{
__shared__ realtype shared[256];
int sharedidx = blockIdx.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
realtype tmp = 0;
while (tid < N) {
tmp += data[tid];
tid += blockDim.x * gridDim.x;
}
shared[sharedidx] = tmp;
__syncthreads();
/* assues blockDim is a power of 2 */
int i = blockDim.x/2;
while (i != 0) {
if (sharedidx < i)
shared[sharedidx] += shared[sharedidx + i];
__syncthreads();
i /= 2;
}
if (sharedidx == 0)
out[sharedidx] = shared[0];
}
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
SUNContext ctx = NULL; /* SUNDIALS context */
N_Vector X = NULL; /* test vector */
sunindextype veclen; /* vector length */
int print_timing; /* output timings */
int ntests; /* number of tests */
int nvecs; /* number of tests */
int nsums; /* number of sums */
int cachesize; /* size of cache (MB) */
int flag; /* return flag */
printf("\nStart Tests\n");
printf("Vector Name: Cuda\n");
/* check input and set vector length */
if (argc < 7){
printf("ERROR: SIX (6) arguments required: ");
printf("<vector length> <number of vectors> <number of sums> <number of tests> ");
printf("<cache size (MB)> <print timing>\n");
return(-1);
}
veclen = atol(argv[1]);
if (veclen <= 0) {
printf("ERROR: length of vector must be a positive integer \n");
return(-1);
}
nvecs = atol(argv[2]);
if (nvecs <= 0) {
printf("ERROR: number of vectors must be a positive integer \n");
return(-1);
}
nsums = atol(argv[3]);
if (nsums <= 0) {
printf("ERROR: number of sums must be a positive integer \n");
return(-1);
}
ntests = atol(argv[4]);
if (ntests <= 0) {
printf("ERROR: number of tests must be a positive integer \n");
return(-1);
}
cachesize = atol(argv[5]);
if (cachesize < 0) {
printf("ERROR: cache size (MB) must be a non-negative integer \n");
return(-1);
}
InitializeClearCache(cachesize);
print_timing = atoi(argv[6]);
SetTiming(print_timing, 0);
printf("\nRunning with: \n");
printf(" vector length %ld \n", (long int) veclen);
printf(" max number of vectors %d \n", nvecs);
printf(" max number of sums %d \n", nsums);
printf(" number of tests %d \n", ntests);
printf(" timing on/off %d \n", print_timing);
flag = SUNContext_Create(NULL, &ctx);
if (flag) return flag;
/* Create vectors */
X = N_VNew_Cuda(veclen, ctx);
/* run tests */
if (print_timing) printf("\n\n standard operations:\n");
if (print_timing) PrintTableHeader(1);
flag = Test_N_VLinearSum(X, veclen, ntests);
flag = Test_N_VConst(X, veclen, ntests);
flag = Test_N_VProd(X, veclen, ntests);
flag = Test_N_VDiv(X, veclen, ntests);
flag = Test_N_VScale(X, veclen, ntests);
flag = Test_N_VAbs(X, veclen, ntests);
flag = Test_N_VInv(X, veclen, ntests);
flag = Test_N_VAddConst(X, veclen, ntests);
flag = Test_N_VDotProd(X, veclen, ntests);
flag = Test_N_VMaxNorm(X, veclen, ntests);
flag = Test_N_VWrmsNorm(X, veclen, ntests);
flag = Test_N_VWrmsNormMask(X, veclen, ntests);
flag = Test_N_VMin(X, veclen, ntests);
flag = Test_N_VWL2Norm(X, veclen, ntests);
flag = Test_N_VL1Norm(X, veclen, ntests);
flag = Test_N_VCompare(X, veclen, ntests);
flag = Test_N_VInvTest(X, veclen, ntests);
flag = Test_N_VConstrMask(X, veclen, ntests);
flag = Test_N_VMinQuotient(X, veclen, ntests);
if (print_timing) printf("\n\n fused operations 1: nvecs= %d\n", nvecs);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VLinearCombination(X, veclen, nvecs, ntests);
flag = Test_N_VScaleAddMulti(X, veclen, nvecs, ntests);
flag = Test_N_VDotProdMulti(X, veclen, nvecs, ntests);
flag = Test_N_VLinearSumVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VScaleVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VConstVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormVectorArray(X, veclen, nvecs, ntests);
flag = Test_N_VWrmsNormMaskVectorArray(X, veclen, nvecs, ntests);
if (print_timing) printf("\n\n fused operations 2: nvecs= %d nsums= %d\n", nvecs, nsums);
if (print_timing) PrintTableHeader(2);
flag = Test_N_VScaleAddMultiVectorArray(X, veclen, nvecs, nsums, ntests);
flag = Test_N_VLinearCombinationVectorArray(X, veclen, nvecs, nsums, ntests);
/* Free vectors */
N_VDestroy(X);
FinalizeClearCache();
flag = SUNContext_Free(&ctx);
if (flag) return flag;
printf("\nFinished Tests\n");
return(flag);
}
/* ----------------------------------------------------------------------
* Functions required by testing routines to fill vector data
* --------------------------------------------------------------------*/
/* random data between lower and upper */
void N_VRand(N_Vector Xvec, sunindextype Xlen, realtype lower, realtype upper)
{
rand_realtype(N_VGetHostArrayPointer_Cuda(Xvec), Xlen, lower, upper);
N_VCopyToDevice_Cuda(Xvec);
}
/* series of 0 and 1 */
void N_VRandZeroOne(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_zero_one(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* random values for constraint array */
void N_VRandConstraints(N_Vector Xvec, sunindextype Xlen)
{
rand_realtype_constraints(N_VGetHostArrayPointer_Cuda(Xvec), Xlen);
N_VCopyToDevice_Cuda(Xvec);
}
/* ----------------------------------------------------------------------
* Functions required for MPI or GPU testing
* --------------------------------------------------------------------*/
void collect_times(N_Vector X, double *times, int ntimes)
{
/* not running with MPI, just return */
return;
}
void sync_device(N_Vector x)
{
cudaDeviceSynchronize();
return;
}
/* ----------------------------------------------------------------------
* Functions required for clearing cache
* --------------------------------------------------------------------*/
static int InitializeClearCache(int cachesize)
{
cudaError_t err; /* cuda error flag */
size_t nbytes; /* cache size in bytes */
/* determine size of vector to clear cache, N = ceil(2 * nbytes/realtype) */
nbytes = (size_t) (2 * cachesize * 1024 * 1024);
N = (sunindextype) ((nbytes + sizeof(realtype) - 1)/sizeof(realtype));
/* allocate host data */
blocksPerGrid = SUNMIN(32,(N+255)/256);
h_data = (realtype*) malloc(N*sizeof(realtype));
h_sum = (realtype*) malloc(blocksPerGrid*sizeof(realtype));
/* allocate device data */
err = cudaMalloc((void**) &d_data, N*sizeof(realtype));
if (err != cudaSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
err = cudaMalloc((void**) &d_sum, blocksPerGrid*sizeof(realtype));
if (err != cudaSuccess) {
fprintf(stderr,"Failed to allocate device vector (error code %d )!\n",err);
return(-1);
}
/* fill host vector with random data and copy to device */
rand_realtype(h_data, N, RCONST(-1.0), RCONST(1.0));
err = cudaMemcpy(d_data, h_data, N*sizeof(realtype), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to copy data from host to device (error code %d )!\n",err);
return(-1);
}
return(0);
}
static int FinalizeClearCache()
{
cudaError_t err; /* cuda error flag */
free(h_data);
free(h_sum);
err = cudaFree(d_data);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
err = cudaFree(d_sum);
if (err != cudaSuccess) {
fprintf(stderr,"Failed to free device data (error code %d )!\n",err);
return(-1);
}
return(0);
}
void ClearCache()
{
/* call cuda kernel to clear the cache */
ClearCacheKernel<<<SUNMIN(32,(N+255)/256), 256>>>(N, d_data, d_sum);
cudaMemcpy(h_sum, d_sum, blocksPerGrid*sizeof(realtype), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
return;
}
|
f419f1ae34ff3db0d0bc299e77970213ebff6ea1.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include "common.h"
#include "restrict.h"
#include "type.h"
hipblasOperation_t trans;
int k;
int inc;
hipblasHandle_t handle;
REAL *alpha;
REAL *beta;
int mvm_init()
{
trans = HIPBLAS_OP_N;
k = 1;
inc = 1;
const REAL alpha_cpu = (REAL) 1.0;
const REAL beta_cpu = (REAL) 0.0;
hipMalloc(&alpha, sizeof(*alpha));
hipMalloc(&beta, sizeof(*beta));
if (alpha == NULL || beta == NULL)
return ERR_CUMALLOC;
hipMemcpy(alpha, &alpha_cpu, 1, hipMemcpyHostToDevice);
hipMemcpy(beta, &beta_cpu, 1, hipMemcpyHostToDevice);
hipblasStatus_t st = hipblasCreate(&handle);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE);
if (st != HIPBLAS_STATUS_SUCCESS)
return ERR_CUBLAS;
else
return ERR_OK;
}
void mvm_cleanup()
{
hipFree(alpha);
hipFree(beta);
hipblasDestroy(handle);
}
// ----------------------------------------------------------------------------
// c = A * b
// mx1 mxn nx1
// ----------------------------------------------------------------------------
void mvm_gemm(const int m, const int n, const REAL *const restrict A,
const REAL *const restrict b, REAL *const restrict c)
{
#if TYPE == FLOAT
hipblasSgemm(handle, trans, trans, m, k, n, alpha, A, m, b, n, beta, c, m);
#elif TYPE == DOUBLE
hipblasDgemm(handle, trans, trans, m, k, n, alpha, A, m, b, n, beta, c, m);
#endif
}
void mvm_gemv(const int m, const int n, const REAL *const restrict A,
const REAL *const restrict b, REAL *const restrict c)
{
#if TYPE == FLOAT
hipblasSgemv(handle, trans, m, n, alpha, A, m, b, inc, beta, c, inc);
#elif TYPE == DOUBLE
hipblasDgemv(handle, trans, m, n, alpha, A, m, b, inc, beta, c, inc);
#endif
}
| f419f1ae34ff3db0d0bc299e77970213ebff6ea1.cu | #include <cublas_v2.h>
#include "common.h"
#include "restrict.h"
#include "type.h"
cublasOperation_t trans;
int k;
int inc;
cublasHandle_t handle;
REAL *alpha;
REAL *beta;
int mvm_init()
{
trans = CUBLAS_OP_N;
k = 1;
inc = 1;
const REAL alpha_cpu = (REAL) 1.0;
const REAL beta_cpu = (REAL) 0.0;
cudaMalloc(&alpha, sizeof(*alpha));
cudaMalloc(&beta, sizeof(*beta));
if (alpha == NULL || beta == NULL)
return ERR_CUMALLOC;
cudaMemcpy(alpha, &alpha_cpu, 1, cudaMemcpyHostToDevice);
cudaMemcpy(beta, &beta_cpu, 1, cudaMemcpyHostToDevice);
cublasStatus_t st = cublasCreate_v2(&handle);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
if (st != CUBLAS_STATUS_SUCCESS)
return ERR_CUBLAS;
else
return ERR_OK;
}
void mvm_cleanup()
{
cudaFree(alpha);
cudaFree(beta);
cublasDestroy_v2(handle);
}
// ----------------------------------------------------------------------------
// c = A * b
// mx1 mxn nx1
// ----------------------------------------------------------------------------
void mvm_gemm(const int m, const int n, const REAL *const restrict A,
const REAL *const restrict b, REAL *const restrict c)
{
#if TYPE == FLOAT
cublasSgemm(handle, trans, trans, m, k, n, alpha, A, m, b, n, beta, c, m);
#elif TYPE == DOUBLE
cublasDgemm(handle, trans, trans, m, k, n, alpha, A, m, b, n, beta, c, m);
#endif
}
void mvm_gemv(const int m, const int n, const REAL *const restrict A,
const REAL *const restrict b, REAL *const restrict c)
{
#if TYPE == FLOAT
cublasSgemv(handle, trans, m, n, alpha, A, m, b, inc, beta, c, inc);
#elif TYPE == DOUBLE
cublasDgemv(handle, trans, m, n, alpha, A, m, b, inc, beta, c, inc);
#endif
}
|
2c18075371d4b908b454a2597cc73dd43bc9e1c0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/partition_manager.hpp>
#include <cugraph/prims/copy_v_transform_reduce_in_out_nbr.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/host_scalar_comm.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
#include <algorithm>
#include <cstdint>
#include <type_traits>
#include <vector>
namespace cugraph {
namespace experimental {
namespace {
// can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an
// extended __device__ lambda must allow its address to be taken)
template <typename vertex_t>
struct out_of_range_t {
vertex_t min{};
vertex_t max{};
__device__ bool operator()(vertex_t v) { return (v < min) || (v >= max); }
};
template <typename vertex_t, typename edge_t>
std::vector<edge_t> update_adj_matrix_partition_edge_counts(
std::vector<edge_t const*> const& adj_matrix_partition_offsets,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_dcs_nzd_vertex_counts,
partition_t<vertex_t> const& partition,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_segment_offsets,
hipStream_t stream)
{
std::vector<edge_t> adj_matrix_partition_edge_counts(partition.get_number_of_matrix_partitions(),
0);
auto use_dcs = adj_matrix_partition_dcs_nzd_vertex_counts.has_value();
for (size_t i = 0; i < adj_matrix_partition_offsets.size(); ++i) {
auto [major_first, major_last] = partition.get_matrix_partition_major_range(i);
raft::update_host(&(adj_matrix_partition_edge_counts[i]),
adj_matrix_partition_offsets[i] +
(use_dcs ? ((*adj_matrix_partition_segment_offsets)
[(detail::num_sparse_segments_per_vertex_partition + 2) * i +
detail::num_sparse_segments_per_vertex_partition] -
(*adj_matrix_partition_dcs_nzd_vertex_counts)[i])
: (major_last - major_first)),
1,
stream);
}
CUDA_TRY(hipStreamSynchronize(stream));
return adj_matrix_partition_edge_counts;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t> compute_minor_degrees(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu> const& graph_view)
{
rmm::device_uvector<edge_t> minor_degrees(graph_view.get_number_of_local_vertices(),
handle.get_stream());
if (store_transposed) {
copy_v_transform_reduce_out_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return edge_t{1};
},
edge_t{0},
minor_degrees.data());
} else {
copy_v_transform_reduce_in_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return edge_t{1};
},
edge_t{0},
minor_degrees.data());
}
return minor_degrees;
}
template <bool major,
typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> compute_weight_sums(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu> const& graph_view)
{
rmm::device_uvector<weight_t> weight_sums(graph_view.get_number_of_local_vertices(),
handle.get_stream());
if (major == store_transposed) {
copy_v_transform_reduce_in_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w;
},
weight_t{0.0},
weight_sums.data());
} else {
copy_v_transform_reduce_out_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w;
},
weight_t{0.0},
weight_sums.data());
}
return weight_sums;
}
} // namespace
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
graph_view_t(
raft::handle_t const& handle,
std::vector<edge_t const*> const& adj_matrix_partition_offsets,
std::vector<vertex_t const*> const& adj_matrix_partition_indices,
std::optional<std::vector<weight_t const*>> const& adj_matrix_partition_weights,
std::optional<std::vector<vertex_t const*>> const& adj_matrix_partition_dcs_nzd_vertices,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_dcs_nzd_vertex_counts,
partition_t<vertex_t> const& partition,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_segment_offsets,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
adj_matrix_partition_offsets_(adj_matrix_partition_offsets),
adj_matrix_partition_indices_(adj_matrix_partition_indices),
adj_matrix_partition_weights_(adj_matrix_partition_weights),
adj_matrix_partition_dcs_nzd_vertices_(adj_matrix_partition_dcs_nzd_vertices),
adj_matrix_partition_dcs_nzd_vertex_counts_(adj_matrix_partition_dcs_nzd_vertex_counts),
adj_matrix_partition_number_of_edges_(
update_adj_matrix_partition_edge_counts(adj_matrix_partition_offsets,
adj_matrix_partition_dcs_nzd_vertex_counts,
partition,
adj_matrix_partition_segment_offsets,
handle.get_stream())),
partition_(partition),
adj_matrix_partition_segment_offsets_(adj_matrix_partition_segment_offsets)
{
// cheap error checks
auto const comm_size = this->get_handle_ptr()->get_comms().get_size();
auto const row_comm_size = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().row_name())
.get_size();
auto const col_comm_size = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().col_name())
.get_size();
auto is_weighted = adj_matrix_partition_weights.has_value();
auto use_dcs = adj_matrix_partition_dcs_nzd_vertices.has_value();
CUGRAPH_EXPECTS(adj_matrix_partition_offsets.size() == adj_matrix_partition_indices.size(),
"Internal Error: adj_matrix_partition_offsets.size() and "
"adj_matrix_partition_indices.size() should coincide.");
CUGRAPH_EXPECTS(
!is_weighted || ((*adj_matrix_partition_weights).size() == adj_matrix_partition_offsets.size()),
"Internal Error: adj_matrix_partition_weights.size() should coincide with "
"adj_matrix_partition_offsets.size() (if weighted).");
CUGRAPH_EXPECTS(adj_matrix_partition_dcs_nzd_vertex_counts.has_value() == use_dcs,
"adj_matrix_partition_dcs_nzd_vertices.has_value() and "
"adj_matrix_partition_dcs_nzd_vertex_counts.has_value() should coincide");
CUGRAPH_EXPECTS(!use_dcs || ((*adj_matrix_partition_dcs_nzd_vertices).size() ==
(*adj_matrix_partition_dcs_nzd_vertex_counts).size()),
"Internal Error: adj_matrix_partition_dcs_nzd_vertices.size() and "
"adj_matrix_partition_dcs_nzd_vertex_counts.size() should coincide (if used).");
CUGRAPH_EXPECTS(!use_dcs || ((*adj_matrix_partition_dcs_nzd_vertices).size() ==
adj_matrix_partition_offsets.size()),
"Internal Error: adj_matrix_partition_dcs_nzd_vertices.size() should coincide "
"with adj_matrix_partition_offsets.size() (if used).");
CUGRAPH_EXPECTS(adj_matrix_partition_offsets.size() == static_cast<size_t>(col_comm_size),
"Internal Error: erroneous adj_matrix_partition_offsets.size().");
CUGRAPH_EXPECTS(
!adj_matrix_partition_segment_offsets.has_value() ||
((*adj_matrix_partition_segment_offsets).size() ==
col_comm_size * (detail::num_sparse_segments_per_vertex_partition + (use_dcs ? 2 : 1))),
"Internal Error: invalid adj_matrix_partition_segment_offsets.size().");
// optional expensive checks
if (do_expensive_check) {
auto default_stream_view = this->get_handle_ptr()->get_stream_view();
auto const row_comm_rank = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().row_name())
.get_rank();
auto const col_comm_rank = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().col_name())
.get_rank();
edge_t number_of_local_edges_sum{};
for (size_t i = 0; i < adj_matrix_partition_offsets.size(); ++i) {
auto [major_first, major_last] = partition.get_matrix_partition_major_range(i);
auto [minor_first, minor_last] = partition.get_matrix_partition_minor_range();
auto offset_array_size = major_last - major_first + 1;
if (use_dcs) {
auto major_hypersparse_first =
major_first + (*adj_matrix_partition_segment_offsets)
[(detail::num_sparse_segments_per_vertex_partition + 2) * i +
detail::num_sparse_segments_per_vertex_partition];
offset_array_size = major_hypersparse_first - major_first +
(*adj_matrix_partition_dcs_nzd_vertex_counts)[i] + 1;
}
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
adj_matrix_partition_offsets[i],
adj_matrix_partition_offsets[i] + offset_array_size),
"Internal Error: adj_matrix_partition_offsets[] is not sorted.");
edge_t number_of_local_edges{};
raft::update_host(&number_of_local_edges,
adj_matrix_partition_offsets[i] + offset_array_size - 1,
1,
default_stream_view.value());
default_stream_view.synchronize();
number_of_local_edges_sum += number_of_local_edges;
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(default_stream_view),
adj_matrix_partition_indices[i],
adj_matrix_partition_indices[i] + number_of_local_edges,
out_of_range_t<vertex_t>{minor_first, minor_last}) == 0,
"Internal Error: adj_matrix_partition_indices[] have out-of-range vertex IDs.");
}
number_of_local_edges_sum = host_scalar_allreduce(
this->get_handle_ptr()->get_comms(), number_of_local_edges_sum, default_stream_view.value());
CUGRAPH_EXPECTS(number_of_local_edges_sum == this->get_number_of_edges(),
"Internal Error: the sum of local edges counts does not match with "
"number_of_local_edges.");
if (adj_matrix_partition_segment_offsets) {
auto degrees = detail::compute_major_degrees(handle,
adj_matrix_partition_offsets,
adj_matrix_partition_dcs_nzd_vertices,
adj_matrix_partition_dcs_nzd_vertex_counts,
partition,
adj_matrix_partition_segment_offsets);
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid Invalid input argument: adj_matrix_partition_segment_offsets are "
"provided, but degrees are not in descending order.");
auto num_segments_per_vertex_partition =
detail::num_sparse_segments_per_vertex_partition + (use_dcs ? 1 : 0);
for (int i = 0; i < col_comm_size; ++i) {
CUGRAPH_EXPECTS(std::is_sorted((*adj_matrix_partition_segment_offsets).begin() +
(num_segments_per_vertex_partition + 1) * i,
(*adj_matrix_partition_segment_offsets).begin() +
(num_segments_per_vertex_partition + 1) * (i + 1)),
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
CUGRAPH_EXPECTS(
(*adj_matrix_partition_segment_offsets)[(num_segments_per_vertex_partition + 1) * i] == 0,
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
auto vertex_partition_idx = row_comm_size * i + row_comm_rank;
CUGRAPH_EXPECTS(
(*adj_matrix_partition_segment_offsets)[(num_segments_per_vertex_partition + 1) * i +
num_segments_per_vertex_partition] ==
partition.get_vertex_partition_size(vertex_partition_idx),
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
}
}
CUGRAPH_EXPECTS(partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices,
"Internal Error: vertex partition should cover [0, number_of_vertices).");
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::graph_view_t(raft::handle_t const& handle,
edge_t const* offsets,
vertex_t const* indices,
std::optional<weight_t const*> weights,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
std::optional<std::vector<vertex_t>> const&
segment_offsets,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
offsets_(offsets),
indices_(indices),
weights_(weights),
segment_offsets_(segment_offsets)
{
// cheap error checks
CUGRAPH_EXPECTS(
!segment_offsets.has_value() ||
((*segment_offsets).size() == (detail::num_sparse_segments_per_vertex_partition + 1)),
"Internal Error: segment_offsets.size() returns an invalid value.");
// optional expensive checks
if (do_expensive_check) {
auto default_stream_view = this->get_handle_ptr()->get_stream_view();
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
offsets,
offsets + (this->get_number_of_vertices() + 1)),
"Internal Error: offsets is not sorted.");
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(default_stream_view),
indices,
indices + this->get_number_of_edges(),
out_of_range_t<vertex_t>{0, this->get_number_of_vertices()}) == 0,
"Internal Error: adj_matrix_partition_indices[] have out-of-range vertex IDs.");
if (segment_offsets) {
auto degrees = detail::compute_major_degrees(handle, offsets, number_of_vertices);
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid Invalid input argument: segment_offsets are provided, but degrees "
"are not in descending order.");
CUGRAPH_EXPECTS(std::is_sorted((*segment_offsets).begin(), (*segment_offsets).end()),
"Internal Error: erroneous segment_offsets.");
CUGRAPH_EXPECTS((*segment_offsets)[0] == 0, "Invalid input argument segment_offsets.");
CUGRAPH_EXPECTS((*segment_offsets).back() == this->get_number_of_vertices(),
"Invalid input argument: segment_offsets.");
}
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_in_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return detail::compute_major_degrees(handle,
this->adj_matrix_partition_offsets_,
this->adj_matrix_partition_dcs_nzd_vertices_,
this->adj_matrix_partition_dcs_nzd_vertex_counts_,
this->partition_,
this->adj_matrix_partition_segment_offsets_);
} else {
return compute_minor_degrees(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_in_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return detail::compute_major_degrees(
handle, this->offsets_, this->get_number_of_local_vertices());
} else {
return compute_minor_degrees(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_out_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_minor_degrees(handle, *this);
} else {
return detail::compute_major_degrees(handle,
this->adj_matrix_partition_offsets_,
this->adj_matrix_partition_dcs_nzd_vertices_,
this->adj_matrix_partition_dcs_nzd_vertex_counts_,
this->partition_,
this->adj_matrix_partition_segment_offsets_);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_out_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_minor_degrees(handle, *this);
} else {
return detail::compute_major_degrees(
handle, this->offsets_, this->get_number_of_local_vertices());
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_in_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<true>(handle, *this);
} else {
return compute_weight_sums<false>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_in_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<true>(handle, *this);
} else {
return compute_weight_sums<false>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_out_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<false>(handle, *this);
} else {
return compute_weight_sums<true>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_out_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<false>(handle, *this);
} else {
return compute_weight_sums<true>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_in_degree(raft::handle_t const& handle) const
{
auto in_degrees = compute_in_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_degrees.begin(), in_degrees.end());
rmm::device_scalar<edge_t> ret(edge_t{0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != in_degrees.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_in_degree(raft::handle_t const&
handle) const
{
auto in_degrees = compute_in_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_degrees.begin(), in_degrees.end());
edge_t ret{0};
if (it != in_degrees.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_out_degree(raft::handle_t const& handle) const
{
auto out_degrees = compute_out_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_degrees.begin(), out_degrees.end());
rmm::device_scalar<edge_t> ret(edge_t{0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != out_degrees.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_out_degree(raft::handle_t const&
handle) const
{
auto out_degrees = compute_out_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_degrees.begin(), out_degrees.end());
edge_t ret{0};
if (it != out_degrees.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_in_weight_sum(raft::handle_t const& handle) const
{
auto in_weight_sums = compute_in_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_weight_sums.begin(), in_weight_sums.end());
rmm::device_scalar<weight_t> ret(weight_t{0.0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != in_weight_sums.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_in_weight_sum(raft::handle_t const&
handle) const
{
auto in_weight_sums = compute_in_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_weight_sums.begin(), in_weight_sums.end());
weight_t ret{0.0};
if (it != in_weight_sums.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_out_weight_sum(raft::handle_t const& handle) const
{
auto out_weight_sums = compute_out_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_weight_sums.begin(), out_weight_sums.end());
rmm::device_scalar<weight_t> ret(weight_t{0.0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != out_weight_sums.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_out_weight_sum(raft::handle_t const& handle) const
{
auto out_weight_sums = compute_out_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_weight_sums.begin(), out_weight_sums.end());
weight_t ret{0.0};
if (it != out_weight_sums.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
// explicit instantiation
template class graph_view_t<int32_t, int32_t, float, true, true>;
template class graph_view_t<int32_t, int32_t, float, false, true>;
template class graph_view_t<int32_t, int32_t, double, true, true>;
template class graph_view_t<int32_t, int32_t, double, false, true>;
template class graph_view_t<int32_t, int64_t, float, true, true>;
template class graph_view_t<int32_t, int64_t, float, false, true>;
template class graph_view_t<int32_t, int64_t, double, true, true>;
template class graph_view_t<int32_t, int64_t, double, false, true>;
template class graph_view_t<int64_t, int64_t, float, true, true>;
template class graph_view_t<int64_t, int64_t, float, false, true>;
template class graph_view_t<int64_t, int64_t, double, true, true>;
template class graph_view_t<int64_t, int64_t, double, false, true>;
template class graph_view_t<int64_t, int32_t, float, true, true>;
template class graph_view_t<int64_t, int32_t, float, false, true>;
template class graph_view_t<int64_t, int32_t, double, true, true>;
template class graph_view_t<int64_t, int32_t, double, false, true>;
template class graph_view_t<int32_t, int32_t, float, true, false>;
template class graph_view_t<int32_t, int32_t, float, false, false>;
template class graph_view_t<int32_t, int32_t, double, true, false>;
template class graph_view_t<int32_t, int32_t, double, false, false>;
template class graph_view_t<int32_t, int64_t, float, true, false>;
template class graph_view_t<int32_t, int64_t, float, false, false>;
template class graph_view_t<int32_t, int64_t, double, true, false>;
template class graph_view_t<int32_t, int64_t, double, false, false>;
template class graph_view_t<int64_t, int64_t, float, true, false>;
template class graph_view_t<int64_t, int64_t, float, false, false>;
template class graph_view_t<int64_t, int64_t, double, true, false>;
template class graph_view_t<int64_t, int64_t, double, false, false>;
template class graph_view_t<int64_t, int32_t, float, true, false>;
template class graph_view_t<int64_t, int32_t, float, false, false>;
template class graph_view_t<int64_t, int32_t, double, true, false>;
template class graph_view_t<int64_t, int32_t, double, false, false>;
} // namespace experimental
} // namespace cugraph
| 2c18075371d4b908b454a2597cc73dd43bc9e1c0.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/partition_manager.hpp>
#include <cugraph/prims/copy_v_transform_reduce_in_out_nbr.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/host_scalar_comm.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
#include <algorithm>
#include <cstdint>
#include <type_traits>
#include <vector>
namespace cugraph {
namespace experimental {
namespace {
// can't use lambda due to nvcc limitations (The enclosing parent function ("graph_view_t") for an
// extended __device__ lambda must allow its address to be taken)
template <typename vertex_t>
struct out_of_range_t {
vertex_t min{};
vertex_t max{};
__device__ bool operator()(vertex_t v) { return (v < min) || (v >= max); }
};
template <typename vertex_t, typename edge_t>
std::vector<edge_t> update_adj_matrix_partition_edge_counts(
std::vector<edge_t const*> const& adj_matrix_partition_offsets,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_dcs_nzd_vertex_counts,
partition_t<vertex_t> const& partition,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_segment_offsets,
cudaStream_t stream)
{
std::vector<edge_t> adj_matrix_partition_edge_counts(partition.get_number_of_matrix_partitions(),
0);
auto use_dcs = adj_matrix_partition_dcs_nzd_vertex_counts.has_value();
for (size_t i = 0; i < adj_matrix_partition_offsets.size(); ++i) {
auto [major_first, major_last] = partition.get_matrix_partition_major_range(i);
raft::update_host(&(adj_matrix_partition_edge_counts[i]),
adj_matrix_partition_offsets[i] +
(use_dcs ? ((*adj_matrix_partition_segment_offsets)
[(detail::num_sparse_segments_per_vertex_partition + 2) * i +
detail::num_sparse_segments_per_vertex_partition] -
(*adj_matrix_partition_dcs_nzd_vertex_counts)[i])
: (major_last - major_first)),
1,
stream);
}
CUDA_TRY(cudaStreamSynchronize(stream));
return adj_matrix_partition_edge_counts;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t> compute_minor_degrees(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu> const& graph_view)
{
rmm::device_uvector<edge_t> minor_degrees(graph_view.get_number_of_local_vertices(),
handle.get_stream());
if (store_transposed) {
copy_v_transform_reduce_out_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return edge_t{1};
},
edge_t{0},
minor_degrees.data());
} else {
copy_v_transform_reduce_in_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return edge_t{1};
},
edge_t{0},
minor_degrees.data());
}
return minor_degrees;
}
template <bool major,
typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> compute_weight_sums(
raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu> const& graph_view)
{
rmm::device_uvector<weight_t> weight_sums(graph_view.get_number_of_local_vertices(),
handle.get_stream());
if (major == store_transposed) {
copy_v_transform_reduce_in_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w;
},
weight_t{0.0},
weight_sums.data());
} else {
copy_v_transform_reduce_out_nbr(
handle,
graph_view,
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[] __device__(vertex_t src, vertex_t dst, weight_t w, auto src_val, auto dst_val) {
return w;
},
weight_t{0.0},
weight_sums.data());
}
return weight_sums;
}
} // namespace
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
graph_view_t(
raft::handle_t const& handle,
std::vector<edge_t const*> const& adj_matrix_partition_offsets,
std::vector<vertex_t const*> const& adj_matrix_partition_indices,
std::optional<std::vector<weight_t const*>> const& adj_matrix_partition_weights,
std::optional<std::vector<vertex_t const*>> const& adj_matrix_partition_dcs_nzd_vertices,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_dcs_nzd_vertex_counts,
partition_t<vertex_t> const& partition,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
std::optional<std::vector<vertex_t>> const& adj_matrix_partition_segment_offsets,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
adj_matrix_partition_offsets_(adj_matrix_partition_offsets),
adj_matrix_partition_indices_(adj_matrix_partition_indices),
adj_matrix_partition_weights_(adj_matrix_partition_weights),
adj_matrix_partition_dcs_nzd_vertices_(adj_matrix_partition_dcs_nzd_vertices),
adj_matrix_partition_dcs_nzd_vertex_counts_(adj_matrix_partition_dcs_nzd_vertex_counts),
adj_matrix_partition_number_of_edges_(
update_adj_matrix_partition_edge_counts(adj_matrix_partition_offsets,
adj_matrix_partition_dcs_nzd_vertex_counts,
partition,
adj_matrix_partition_segment_offsets,
handle.get_stream())),
partition_(partition),
adj_matrix_partition_segment_offsets_(adj_matrix_partition_segment_offsets)
{
// cheap error checks
auto const comm_size = this->get_handle_ptr()->get_comms().get_size();
auto const row_comm_size = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().row_name())
.get_size();
auto const col_comm_size = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().col_name())
.get_size();
auto is_weighted = adj_matrix_partition_weights.has_value();
auto use_dcs = adj_matrix_partition_dcs_nzd_vertices.has_value();
CUGRAPH_EXPECTS(adj_matrix_partition_offsets.size() == adj_matrix_partition_indices.size(),
"Internal Error: adj_matrix_partition_offsets.size() and "
"adj_matrix_partition_indices.size() should coincide.");
CUGRAPH_EXPECTS(
!is_weighted || ((*adj_matrix_partition_weights).size() == adj_matrix_partition_offsets.size()),
"Internal Error: adj_matrix_partition_weights.size() should coincide with "
"adj_matrix_partition_offsets.size() (if weighted).");
CUGRAPH_EXPECTS(adj_matrix_partition_dcs_nzd_vertex_counts.has_value() == use_dcs,
"adj_matrix_partition_dcs_nzd_vertices.has_value() and "
"adj_matrix_partition_dcs_nzd_vertex_counts.has_value() should coincide");
CUGRAPH_EXPECTS(!use_dcs || ((*adj_matrix_partition_dcs_nzd_vertices).size() ==
(*adj_matrix_partition_dcs_nzd_vertex_counts).size()),
"Internal Error: adj_matrix_partition_dcs_nzd_vertices.size() and "
"adj_matrix_partition_dcs_nzd_vertex_counts.size() should coincide (if used).");
CUGRAPH_EXPECTS(!use_dcs || ((*adj_matrix_partition_dcs_nzd_vertices).size() ==
adj_matrix_partition_offsets.size()),
"Internal Error: adj_matrix_partition_dcs_nzd_vertices.size() should coincide "
"with adj_matrix_partition_offsets.size() (if used).");
CUGRAPH_EXPECTS(adj_matrix_partition_offsets.size() == static_cast<size_t>(col_comm_size),
"Internal Error: erroneous adj_matrix_partition_offsets.size().");
CUGRAPH_EXPECTS(
!adj_matrix_partition_segment_offsets.has_value() ||
((*adj_matrix_partition_segment_offsets).size() ==
col_comm_size * (detail::num_sparse_segments_per_vertex_partition + (use_dcs ? 2 : 1))),
"Internal Error: invalid adj_matrix_partition_segment_offsets.size().");
// optional expensive checks
if (do_expensive_check) {
auto default_stream_view = this->get_handle_ptr()->get_stream_view();
auto const row_comm_rank = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().row_name())
.get_rank();
auto const col_comm_rank = this->get_handle_ptr()
->get_subcomm(cugraph::partition_2d::key_naming_t().col_name())
.get_rank();
edge_t number_of_local_edges_sum{};
for (size_t i = 0; i < adj_matrix_partition_offsets.size(); ++i) {
auto [major_first, major_last] = partition.get_matrix_partition_major_range(i);
auto [minor_first, minor_last] = partition.get_matrix_partition_minor_range();
auto offset_array_size = major_last - major_first + 1;
if (use_dcs) {
auto major_hypersparse_first =
major_first + (*adj_matrix_partition_segment_offsets)
[(detail::num_sparse_segments_per_vertex_partition + 2) * i +
detail::num_sparse_segments_per_vertex_partition];
offset_array_size = major_hypersparse_first - major_first +
(*adj_matrix_partition_dcs_nzd_vertex_counts)[i] + 1;
}
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
adj_matrix_partition_offsets[i],
adj_matrix_partition_offsets[i] + offset_array_size),
"Internal Error: adj_matrix_partition_offsets[] is not sorted.");
edge_t number_of_local_edges{};
raft::update_host(&number_of_local_edges,
adj_matrix_partition_offsets[i] + offset_array_size - 1,
1,
default_stream_view.value());
default_stream_view.synchronize();
number_of_local_edges_sum += number_of_local_edges;
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(default_stream_view),
adj_matrix_partition_indices[i],
adj_matrix_partition_indices[i] + number_of_local_edges,
out_of_range_t<vertex_t>{minor_first, minor_last}) == 0,
"Internal Error: adj_matrix_partition_indices[] have out-of-range vertex IDs.");
}
number_of_local_edges_sum = host_scalar_allreduce(
this->get_handle_ptr()->get_comms(), number_of_local_edges_sum, default_stream_view.value());
CUGRAPH_EXPECTS(number_of_local_edges_sum == this->get_number_of_edges(),
"Internal Error: the sum of local edges counts does not match with "
"number_of_local_edges.");
if (adj_matrix_partition_segment_offsets) {
auto degrees = detail::compute_major_degrees(handle,
adj_matrix_partition_offsets,
adj_matrix_partition_dcs_nzd_vertices,
adj_matrix_partition_dcs_nzd_vertex_counts,
partition,
adj_matrix_partition_segment_offsets);
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid Invalid input argument: adj_matrix_partition_segment_offsets are "
"provided, but degrees are not in descending order.");
auto num_segments_per_vertex_partition =
detail::num_sparse_segments_per_vertex_partition + (use_dcs ? 1 : 0);
for (int i = 0; i < col_comm_size; ++i) {
CUGRAPH_EXPECTS(std::is_sorted((*adj_matrix_partition_segment_offsets).begin() +
(num_segments_per_vertex_partition + 1) * i,
(*adj_matrix_partition_segment_offsets).begin() +
(num_segments_per_vertex_partition + 1) * (i + 1)),
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
CUGRAPH_EXPECTS(
(*adj_matrix_partition_segment_offsets)[(num_segments_per_vertex_partition + 1) * i] == 0,
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
auto vertex_partition_idx = row_comm_size * i + row_comm_rank;
CUGRAPH_EXPECTS(
(*adj_matrix_partition_segment_offsets)[(num_segments_per_vertex_partition + 1) * i +
num_segments_per_vertex_partition] ==
partition.get_vertex_partition_size(vertex_partition_idx),
"Internal Error: erroneous adj_matrix_partition_segment_offsets.");
}
}
CUGRAPH_EXPECTS(partition.get_vertex_partition_last(comm_size - 1) == number_of_vertices,
"Internal Error: vertex partition should cover [0, number_of_vertices).");
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::graph_view_t(raft::handle_t const& handle,
edge_t const* offsets,
vertex_t const* indices,
std::optional<weight_t const*> weights,
vertex_t number_of_vertices,
edge_t number_of_edges,
graph_properties_t properties,
std::optional<std::vector<vertex_t>> const&
segment_offsets,
bool do_expensive_check)
: detail::graph_base_t<vertex_t, edge_t, weight_t>(
handle, number_of_vertices, number_of_edges, properties),
offsets_(offsets),
indices_(indices),
weights_(weights),
segment_offsets_(segment_offsets)
{
// cheap error checks
CUGRAPH_EXPECTS(
!segment_offsets.has_value() ||
((*segment_offsets).size() == (detail::num_sparse_segments_per_vertex_partition + 1)),
"Internal Error: segment_offsets.size() returns an invalid value.");
// optional expensive checks
if (do_expensive_check) {
auto default_stream_view = this->get_handle_ptr()->get_stream_view();
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
offsets,
offsets + (this->get_number_of_vertices() + 1)),
"Internal Error: offsets is not sorted.");
// better use thrust::any_of once https://github.com/thrust/thrust/issues/1016 is resolved
CUGRAPH_EXPECTS(
thrust::count_if(rmm::exec_policy(default_stream_view),
indices,
indices + this->get_number_of_edges(),
out_of_range_t<vertex_t>{0, this->get_number_of_vertices()}) == 0,
"Internal Error: adj_matrix_partition_indices[] have out-of-range vertex IDs.");
if (segment_offsets) {
auto degrees = detail::compute_major_degrees(handle, offsets, number_of_vertices);
CUGRAPH_EXPECTS(thrust::is_sorted(rmm::exec_policy(default_stream_view),
degrees.begin(),
degrees.end(),
thrust::greater<edge_t>{}),
"Invalid Invalid input argument: segment_offsets are provided, but degrees "
"are not in descending order.");
CUGRAPH_EXPECTS(std::is_sorted((*segment_offsets).begin(), (*segment_offsets).end()),
"Internal Error: erroneous segment_offsets.");
CUGRAPH_EXPECTS((*segment_offsets)[0] == 0, "Invalid input argument segment_offsets.");
CUGRAPH_EXPECTS((*segment_offsets).back() == this->get_number_of_vertices(),
"Invalid input argument: segment_offsets.");
}
// FIXME: check for symmetricity may better be implemetned with transpose().
if (this->is_symmetric()) {}
// FIXME: check for duplicate edges may better be implemented after deciding whether to sort
// neighbor list or not.
if (!this->is_multigraph()) {}
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_in_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return detail::compute_major_degrees(handle,
this->adj_matrix_partition_offsets_,
this->adj_matrix_partition_dcs_nzd_vertices_,
this->adj_matrix_partition_dcs_nzd_vertex_counts_,
this->partition_,
this->adj_matrix_partition_segment_offsets_);
} else {
return compute_minor_degrees(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_in_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return detail::compute_major_degrees(
handle, this->offsets_, this->get_number_of_local_vertices());
} else {
return compute_minor_degrees(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_out_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_minor_degrees(handle, *this);
} else {
return detail::compute_major_degrees(handle,
this->adj_matrix_partition_offsets_,
this->adj_matrix_partition_dcs_nzd_vertices_,
this->adj_matrix_partition_dcs_nzd_vertex_counts_,
this->partition_,
this->adj_matrix_partition_segment_offsets_);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<edge_t>
graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_out_degrees(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_minor_degrees(handle, *this);
} else {
return detail::compute_major_degrees(
handle, this->offsets_, this->get_number_of_local_vertices());
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_in_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<true>(handle, *this);
} else {
return compute_weight_sums<false>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_in_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<true>(handle, *this);
} else {
return compute_weight_sums<false>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t>
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_out_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<false>(handle, *this);
} else {
return compute_weight_sums<true>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
rmm::device_uvector<weight_t> graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_out_weight_sums(raft::handle_t const& handle) const
{
if (store_transposed) {
return compute_weight_sums<false>(handle, *this);
} else {
return compute_weight_sums<true>(handle, *this);
}
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_in_degree(raft::handle_t const& handle) const
{
auto in_degrees = compute_in_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_degrees.begin(), in_degrees.end());
rmm::device_scalar<edge_t> ret(edge_t{0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != in_degrees.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_in_degree(raft::handle_t const&
handle) const
{
auto in_degrees = compute_in_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_degrees.begin(), in_degrees.end());
edge_t ret{0};
if (it != in_degrees.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_out_degree(raft::handle_t const& handle) const
{
auto out_degrees = compute_out_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_degrees.begin(), out_degrees.end());
rmm::device_scalar<edge_t> ret(edge_t{0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != out_degrees.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
edge_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_out_degree(raft::handle_t const&
handle) const
{
auto out_degrees = compute_out_degrees(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_degrees.begin(), out_degrees.end());
edge_t ret{0};
if (it != out_degrees.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_in_weight_sum(raft::handle_t const& handle) const
{
auto in_weight_sums = compute_in_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_weight_sums.begin(), in_weight_sums.end());
rmm::device_scalar<weight_t> ret(weight_t{0.0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != in_weight_sums.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t graph_view_t<vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_in_weight_sum(raft::handle_t const&
handle) const
{
auto in_weight_sums = compute_in_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), in_weight_sums.begin(), in_weight_sums.end());
weight_t ret{0.0};
if (it != in_weight_sums.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t
graph_view_t<vertex_t, edge_t, weight_t, store_transposed, multi_gpu, std::enable_if_t<multi_gpu>>::
compute_max_out_weight_sum(raft::handle_t const& handle) const
{
auto out_weight_sums = compute_out_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_weight_sums.begin(), out_weight_sums.end());
rmm::device_scalar<weight_t> ret(weight_t{0.0}, handle.get_stream());
device_allreduce(handle.get_comms(),
it != out_weight_sums.end() ? it : ret.data(),
ret.data(),
1,
raft::comms::op_t::MAX,
handle.get_stream());
return ret.value(handle.get_stream());
}
template <typename vertex_t,
typename edge_t,
typename weight_t,
bool store_transposed,
bool multi_gpu>
weight_t graph_view_t<
vertex_t,
edge_t,
weight_t,
store_transposed,
multi_gpu,
std::enable_if_t<!multi_gpu>>::compute_max_out_weight_sum(raft::handle_t const& handle) const
{
auto out_weight_sums = compute_out_weight_sums(handle);
auto it = thrust::max_element(
rmm::exec_policy(handle.get_stream_view()), out_weight_sums.begin(), out_weight_sums.end());
weight_t ret{0.0};
if (it != out_weight_sums.end()) { raft::update_host(&ret, it, 1, handle.get_stream()); }
handle.get_stream_view().synchronize();
return ret;
}
// explicit instantiation
template class graph_view_t<int32_t, int32_t, float, true, true>;
template class graph_view_t<int32_t, int32_t, float, false, true>;
template class graph_view_t<int32_t, int32_t, double, true, true>;
template class graph_view_t<int32_t, int32_t, double, false, true>;
template class graph_view_t<int32_t, int64_t, float, true, true>;
template class graph_view_t<int32_t, int64_t, float, false, true>;
template class graph_view_t<int32_t, int64_t, double, true, true>;
template class graph_view_t<int32_t, int64_t, double, false, true>;
template class graph_view_t<int64_t, int64_t, float, true, true>;
template class graph_view_t<int64_t, int64_t, float, false, true>;
template class graph_view_t<int64_t, int64_t, double, true, true>;
template class graph_view_t<int64_t, int64_t, double, false, true>;
template class graph_view_t<int64_t, int32_t, float, true, true>;
template class graph_view_t<int64_t, int32_t, float, false, true>;
template class graph_view_t<int64_t, int32_t, double, true, true>;
template class graph_view_t<int64_t, int32_t, double, false, true>;
template class graph_view_t<int32_t, int32_t, float, true, false>;
template class graph_view_t<int32_t, int32_t, float, false, false>;
template class graph_view_t<int32_t, int32_t, double, true, false>;
template class graph_view_t<int32_t, int32_t, double, false, false>;
template class graph_view_t<int32_t, int64_t, float, true, false>;
template class graph_view_t<int32_t, int64_t, float, false, false>;
template class graph_view_t<int32_t, int64_t, double, true, false>;
template class graph_view_t<int32_t, int64_t, double, false, false>;
template class graph_view_t<int64_t, int64_t, float, true, false>;
template class graph_view_t<int64_t, int64_t, float, false, false>;
template class graph_view_t<int64_t, int64_t, double, true, false>;
template class graph_view_t<int64_t, int64_t, double, false, false>;
template class graph_view_t<int64_t, int32_t, float, true, false>;
template class graph_view_t<int64_t, int32_t, float, false, false>;
template class graph_view_t<int64_t, int32_t, double, true, false>;
template class graph_view_t<int64_t, int32_t, double, false, false>;
} // namespace experimental
} // namespace cugraph
|
8bd2a62cb7811d9a553e777559a3521e01f2f5ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Author: Mikers
date march 4, 2018 for 0xbitcoin dev
based off of https://github.com/Dunhili/SHA3-gpu-brute-force-cracker/blob/master/sha3.cu
* Author: Brian Bowden
* Date: 5/12/14
*
* This is the parallel version of SHA-3.
*/
#include <iostream>
#include <sstream>
#include <iomanip>
#include <cstring>
#include "cudasolver.h"
#ifdef __INTELLISENSE__
/* reduce vstudio warnings (__byteperm, blockIdx...) */
# include <hip/device_functions.h>
# include <device_launch_parameters.h>
# include <hip/hip_runtime.h>
# include <hip/hip_runtime.h>
#endif //__INTELLISENSE__
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, m_device)
__host__ inline
auto __cudaSafeCall( hipError_t err, char const* file, int32_t const line, int32_t device_id ) -> void
{
#ifndef CUDA_NDEBUG
if (hipSuccess != err) {
std::cerr << "CUDA device " << device_id
<< " encountered an error in file '" << file
<< "' in line " << line
<< " : " << hipGetErrorString( err ) << ".\n";
exit(EXIT_FAILURE);
}
#endif
}
__constant__ uint64_t d_mid[25];
__constant__ uint64_t d_target;
__device__ __constant__ uint64_t const RC[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
};
__device__ __forceinline__
auto ROTL64( uint64_t x, uint32_t y ) -> uint64_t const
{
return (x << y) ^ (x >> (64 - y));
}
__device__ __forceinline__
auto ROTR64( uint64_t x, uint32_t y ) -> uint64_t const
{
return (x >> y) ^ (x << (64 - y));
}
__device__ __forceinline__
auto bswap_64( uint64_t const input ) -> uint64_t const
{
uint64_t output;
asm( "{"
" prmt.b32 %0, %3, 0, 0x0123;"
" prmt.b32 %1, %2, 0, 0x0123;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(input).x), "r"(reinterpret_cast<uint2 const&>(input).y) );
return output;
}
__device__ __forceinline__
auto bswap_32( uint32_t const input ) -> uint32_t const
{
uint32_t output;
asm( "prmt.b32 %0, %1, 0, 0x0123;" : "=r"(output) : "r"(input) );
return output;
}
__device__ __forceinline__
auto xor5( uint64_t const a, uint64_t const b, uint64_t const c, uint64_t const d, uint64_t const e ) -> uint64_t const
{
uint64_t output;
asm( "{"
" xor.b64 %0, %1, %2;"
" xor.b64 %0, %0, %3;"
" xor.b64 %0, %0, %4;"
" xor.b64 %0, %0, %5;"
"}" : "=l"(output) : "l"(a), "l"(b), "l"(c), "l"(d), "l"(e) );
return output;
}
__device__ __forceinline__
auto xor3( uint64_t const a, uint64_t const b, uint64_t const c ) -> uint64_t const
{
uint64_t output;
#if __CUDA_ARCH__ >= 500
asm( "{"
" lop3.b32 %0, %2, %4, %6, 0x96;"
" lop3.b32 %1, %3, %5, %7, 0x96;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(a).x), "r"(reinterpret_cast<uint2 const&>(a).y),
"r"(reinterpret_cast<uint2 const&>(b).x), "r"(reinterpret_cast<uint2 const&>(b).y),
"r"(reinterpret_cast<uint2 const&>(c).x), "r"(reinterpret_cast<uint2 const&>(c).y) );
#else
asm( "{"
" xor.b64 %0, %1, %2;"
" xor.b64 %0, %0, %3;"
"}" : "=l"(output) : "l"(a), "l"(b), "l"(c) );
#endif
return output;
}
__device__ __forceinline__
auto chi( uint64_t const a, uint64_t const b, uint64_t const c ) -> uint64_t const
{
#if __CUDA_ARCH__ >= 500
uint64_t output;
asm( "{"
" lop3.b32 %0, %2, %4, %6, 0xD2;"
" lop3.b32 %1, %3, %5, %7, 0xD2;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(a).x), "r"(reinterpret_cast<uint2 const&>(a).y),
"r"(reinterpret_cast<uint2 const&>(b).x), "r"(reinterpret_cast<uint2 const&>(b).y),
"r"(reinterpret_cast<uint2 const&>(c).x), "r"(reinterpret_cast<uint2 const&>(c).y) );
return output;
#else
return a ^ ((~b) & c);
#endif
}
__global__
void cuda_mine( uint64_t* __restrict__ solution, uint32_t* __restrict__ solution_count, uint64_t const threads )
{
uint64_t const nounce{ threads + (blockDim.x * blockIdx.x + threadIdx.x) };
uint64_t state[25], C[5], D[5];
uint64_t n[11] { ROTL64(nounce, 7) };
n[ 1] = ROTL64(n[ 0], 1);
n[ 2] = ROTL64(n[ 1], 6);
n[ 3] = ROTL64(n[ 2], 2);
n[ 4] = ROTL64(n[ 3], 4);
n[ 5] = ROTL64(n[ 4], 7);
n[ 6] = ROTL64(n[ 5], 12);
n[ 7] = ROTL64(n[ 6], 5);
n[ 8] = ROTL64(n[ 7], 11);
n[ 9] = ROTL64(n[ 8], 7);
n[10] = ROTL64(n[ 9], 1);
C[0] = d_mid[ 0];
C[1] = d_mid[ 1];
C[2] = d_mid[ 2] ^ n[ 7];
C[3] = d_mid[ 3];
C[4] = d_mid[ 4] ^ n[ 2];
state[ 0] = chi( C[0], C[1], C[2] ) ^ RC[0];
state[ 1] = chi( C[1], C[2], C[3] );
state[ 2] = chi( C[2], C[3], C[4] );
state[ 3] = chi( C[3], C[4], C[0] );
state[ 4] = chi( C[4], C[0], C[1] );
C[0] = d_mid[ 5];
C[1] = d_mid[ 6] ^ n[ 4];
C[2] = d_mid[ 7];
C[3] = d_mid[ 8];
C[4] = d_mid[ 9] ^ n[ 9];
state[ 5] = chi( C[0], C[1], C[2] );
state[ 6] = chi( C[1], C[2], C[3] );
state[ 7] = chi( C[2], C[3], C[4] );
state[ 8] = chi( C[3], C[4], C[0] );
state[ 9] = chi( C[4], C[0], C[1] );
C[0] = d_mid[10];
C[1] = d_mid[11] ^ n[ 0];
C[2] = d_mid[12];
C[3] = d_mid[13] ^ n[ 1];
C[4] = d_mid[14];
state[10] = chi( C[0], C[1], C[2] );
state[11] = chi( C[1], C[2], C[3] );
state[12] = chi( C[2], C[3], C[4] );
state[13] = chi( C[3], C[4], C[0] );
state[14] = chi( C[4], C[0], C[1] );
C[0] = d_mid[15] ^ n[ 5];
C[1] = d_mid[16];
C[2] = d_mid[17];
C[3] = d_mid[18] ^ n[ 3];
C[4] = d_mid[19];
state[15] = chi( C[0], C[1], C[2] );
state[16] = chi( C[1], C[2], C[3] );
state[17] = chi( C[2], C[3], C[4] );
state[18] = chi( C[3], C[4], C[0] );
state[19] = chi( C[4], C[0], C[1] );
C[0] = d_mid[20] ^ n[10];
C[1] = d_mid[21] ^ n[ 8];
C[2] = d_mid[22] ^ n[ 6];
C[3] = d_mid[23];
C[4] = d_mid[24];
state[20] = chi( C[0], C[1], C[2] );
state[21] = chi( C[1], C[2], C[3] );
state[22] = chi( C[2], C[3], C[4] );
state[23] = chi( C[3], C[4], C[0] );
state[24] = chi( C[4], C[0], C[1] );
#if __CUDA_ARCH__ >= 350
# pragma unroll
#endif
for( uint_fast8_t i{ 1 }; i < 23; ++i )
{
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
C[(x + 6) % 5] = xor5( state[x], state[x + 5], state[x + 10], state[x + 15], state[x + 20] );
}
#if __CUDA_ARCH__ >= 350
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
D[x] = ROTL64(C[(x + 2) % 5], 1);
state[x] = xor3( state[x] , D[x], C[x] );
state[x + 5] = xor3( state[x + 5], D[x], C[x] );
state[x + 10] = xor3( state[x + 10], D[x], C[x] );
state[x + 15] = xor3( state[x + 15], D[x], C[x] );
state[x + 20] = xor3( state[x + 20], D[x], C[x] );
}
#else
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
D[x] = ROTL64(C[(x + 2) % 5], 1) ^ C[x];
state[x] = state[x] ^ D[x];
state[x + 5] = state[x + 5] ^ D[x];
state[x + 10] = state[x + 10] ^ D[x];
state[x + 15] = state[x + 15] ^ D[x];
state[x + 20] = state[x + 20] ^ D[x];
}
#endif
C[0] = state[1];
state[ 1] = ROTR64( state[ 6], 20 );
state[ 6] = ROTL64( state[ 9], 20 );
state[ 9] = ROTR64( state[22], 3 );
state[22] = ROTR64( state[14], 25 );
state[14] = ROTL64( state[20], 18 );
state[20] = ROTR64( state[ 2], 2 );
state[ 2] = ROTR64( state[12], 21 );
state[12] = ROTL64( state[13], 25 );
state[13] = ROTL64( state[19], 8 );
state[19] = ROTR64( state[23], 8 );
state[23] = ROTR64( state[15], 23 );
state[15] = ROTL64( state[ 4], 27 );
state[ 4] = ROTL64( state[24], 14 );
state[24] = ROTL64( state[21], 2 );
state[21] = ROTR64( state[ 8], 9 );
state[ 8] = ROTR64( state[16], 19 );
state[16] = ROTR64( state[ 5], 28 );
state[ 5] = ROTL64( state[ 3], 28 );
state[ 3] = ROTL64( state[18], 21 );
state[18] = ROTL64( state[17], 15 );
state[17] = ROTL64( state[11], 10 );
state[11] = ROTL64( state[ 7], 6 );
state[ 7] = ROTL64( state[10], 3 );
state[10] = ROTL64( C[0], 1 );
for( uint_fast8_t x{ 0 }; x < 25; x += 5 )
{
C[0] = state[x];
C[1] = state[x + 1];
C[2] = state[x + 2];
C[3] = state[x + 3];
C[4] = state[x + 4];
state[x] = chi( C[0], C[1], C[2] );
state[x + 1] = chi( C[1], C[2], C[3] );
state[x + 2] = chi( C[2], C[3], C[4] );
state[x + 3] = chi( C[3], C[4], C[0] );
state[x + 4] = chi( C[4], C[0], C[1] );
}
state[0] = state[0] ^ RC[i];
}
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
C[(x + 6) % 5 ] = xor5( state[x], state[x + 5], state[x + 10], state[x + 15], state[x + 20] );
}
D[0] = ROTL64(C[2], 1);
D[1] = ROTL64(C[3], 1);
D[2] = ROTL64(C[4], 1);
state[ 0] = xor3( state[ 0], D[0], C[0] );
state[ 6] = xor3( state[ 6], D[1], C[1] );
state[12] = xor3( state[12], D[2], C[2] );
state[ 6] = ROTR64(state[ 6], 20);
state[12] = ROTR64(state[12], 21);
state[ 0] = chi( state[ 0], state[ 6], state[12] ) ^ RC[23];
if( bswap_64( state[0] ) <= d_target )
{
uint64_t cIdx{ atomicAdd( solution_count, 1 ) };
if( cIdx >= 256 ) return;
solution[cIdx] = nounce;
}
}
// --------------------------------------------------------------------
auto CUDASolver::cudaInit() -> void
{
hipSetDevice( m_device );
hipDeviceProp_t device_prop;
cudaSafeCall( hipGetDeviceProperties( &device_prop, m_device ) );
int32_t compute_version{ device_prop.major * 100 + device_prop.minor * 10 };
if( compute_version <= 500 )
{
m_intensity = m_intensity <= 40.55 ? m_intensity : 40.55;
m_threads = static_cast<uint64_t>( ::pow( 2, m_intensity <= 40.55 ? m_intensity : 40.55 ) );
}
m_block.x = compute_version > 500 ? TPB50 : TPB35;
m_grid.x = uint32_t((m_threads + m_block.x - 1) / m_block.x);
if( !m_gpu_initialized )
{
// CPU usage goes _insane_ without this.
cudaSafeCall( hipDeviceReset() );
// so we don't actually _use_ L1 or local memory . . .
cudaSafeCall( hipSetDeviceFlags( hipDeviceScheduleBlockingSync ) );
cudaSafeCall( hipMalloc( reinterpret_cast<void**>(&d_solution_count), 4 ) );
cudaSafeCall( hipHostMalloc( reinterpret_cast<void**>(&h_solution_count), 4 ) );
cudaSafeCall( hipMalloc( reinterpret_cast<void**>(&d_solutions), 256*8 ) );
cudaSafeCall( hipHostMalloc( reinterpret_cast<void**>(&h_solutions), 256*8 ) );
cudaResetSolution();
m_gpu_initialized = true;
}
}
auto CUDASolver::cudaCleanup() -> void
{
cudaSafeCall( hipSetDevice( m_device ) );
cudaSafeCall( hipDeviceSynchronize() );
cudaSafeCall( hipFree( d_solution_count ) );
cudaSafeCall( hipHostFree( h_solution_count ) );
cudaSafeCall( hipFree( d_solutions ) );
cudaSafeCall( hipHostFree( h_solutions ) );
cudaSafeCall( hipDeviceReset() );
m_gpu_initialized = false;
}
auto CUDASolver::cudaResetSolution() -> void
{
hipSetDevice( m_device );
std::memset( h_solution_count, 0u, 4 );
cudaSafeCall( hipMemset( d_solution_count, 0u, 4 ) );
}
auto CUDASolver::pushTarget() -> void
{
hipSetDevice( m_device );
uint64_t target{ getTarget() };
cudaSafeCall( hipMemcpyToSymbol( d_target, &target, 8, 0, hipMemcpyHostToDevice) );
m_new_target = false;
}
auto CUDASolver::pushMessage() -> void
{
hipSetDevice( m_device );
cudaSafeCall( hipMemcpyToSymbol( d_mid, getMidstate().data(), 200, 0, hipMemcpyHostToDevice) );
m_new_message = false;
}
auto CUDASolver::findSolution() -> void
{
cudaInit();
hipSetDevice( m_device );
do
{
if( m_new_target ) { pushTarget(); }
if( m_new_message ) { pushMessage(); }
hipLaunchKernelGGL(( cuda_mine) , dim3(m_grid), dim3(m_block) , 0, 0, d_solutions, d_solution_count, getNextSearchSpace() );
hipError_t cudaerr = hipDeviceSynchronize();
if( cudaerr != hipSuccess )
{
std::cerr << "Kernel launch failed with error "
<< cudaerr
<< ": \x1b[38;5;196m"
<< hipGetErrorString( cudaerr )
<< ".\x1b[0m\n"
<< "Check your hardware configuration.\n";
exit( EXIT_FAILURE );
}
cudaSafeCall( hipMemcpy( h_solution_count, d_solution_count, 4, hipMemcpyDeviceToHost ) );
if( *h_solution_count )
{
cudaSafeCall( hipMemcpy( h_solutions, d_solutions, (*h_solution_count)*8, hipMemcpyDeviceToHost ) );
pushSolutions();
cudaResetSolution();
}
} while( !m_stop );
m_stopped = true;
}
| 8bd2a62cb7811d9a553e777559a3521e01f2f5ad.cu | /*
Author: Mikers
date march 4, 2018 for 0xbitcoin dev
based off of https://github.com/Dunhili/SHA3-gpu-brute-force-cracker/blob/master/sha3.cu
* Author: Brian Bowden
* Date: 5/12/14
*
* This is the parallel version of SHA-3.
*/
#include <iostream>
#include <sstream>
#include <iomanip>
#include <cstring>
#include "cudasolver.h"
#ifdef __INTELLISENSE__
/* reduce vstudio warnings (__byteperm, blockIdx...) */
# include <device_functions.h>
# include <device_launch_parameters.h>
# include <cuda_runtime.h>
# include <cuda.h>
#endif //__INTELLISENSE__
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, m_device)
__host__ inline
auto __cudaSafeCall( cudaError_t err, char const* file, int32_t const line, int32_t device_id ) -> void
{
#ifndef CUDA_NDEBUG
if (cudaSuccess != err) {
std::cerr << "CUDA device " << device_id
<< " encountered an error in file '" << file
<< "' in line " << line
<< " : " << cudaGetErrorString( err ) << ".\n";
exit(EXIT_FAILURE);
}
#endif
}
__constant__ uint64_t d_mid[25];
__constant__ uint64_t d_target;
__device__ __constant__ uint64_t const RC[24] = {
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
};
__device__ __forceinline__
auto ROTL64( uint64_t x, uint32_t y ) -> uint64_t const
{
return (x << y) ^ (x >> (64 - y));
}
__device__ __forceinline__
auto ROTR64( uint64_t x, uint32_t y ) -> uint64_t const
{
return (x >> y) ^ (x << (64 - y));
}
__device__ __forceinline__
auto bswap_64( uint64_t const input ) -> uint64_t const
{
uint64_t output;
asm( "{"
" prmt.b32 %0, %3, 0, 0x0123;"
" prmt.b32 %1, %2, 0, 0x0123;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(input).x), "r"(reinterpret_cast<uint2 const&>(input).y) );
return output;
}
__device__ __forceinline__
auto bswap_32( uint32_t const input ) -> uint32_t const
{
uint32_t output;
asm( "prmt.b32 %0, %1, 0, 0x0123;" : "=r"(output) : "r"(input) );
return output;
}
__device__ __forceinline__
auto xor5( uint64_t const a, uint64_t const b, uint64_t const c, uint64_t const d, uint64_t const e ) -> uint64_t const
{
uint64_t output;
asm( "{"
" xor.b64 %0, %1, %2;"
" xor.b64 %0, %0, %3;"
" xor.b64 %0, %0, %4;"
" xor.b64 %0, %0, %5;"
"}" : "=l"(output) : "l"(a), "l"(b), "l"(c), "l"(d), "l"(e) );
return output;
}
__device__ __forceinline__
auto xor3( uint64_t const a, uint64_t const b, uint64_t const c ) -> uint64_t const
{
uint64_t output;
#if __CUDA_ARCH__ >= 500
asm( "{"
" lop3.b32 %0, %2, %4, %6, 0x96;"
" lop3.b32 %1, %3, %5, %7, 0x96;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(a).x), "r"(reinterpret_cast<uint2 const&>(a).y),
"r"(reinterpret_cast<uint2 const&>(b).x), "r"(reinterpret_cast<uint2 const&>(b).y),
"r"(reinterpret_cast<uint2 const&>(c).x), "r"(reinterpret_cast<uint2 const&>(c).y) );
#else
asm( "{"
" xor.b64 %0, %1, %2;"
" xor.b64 %0, %0, %3;"
"}" : "=l"(output) : "l"(a), "l"(b), "l"(c) );
#endif
return output;
}
__device__ __forceinline__
auto chi( uint64_t const a, uint64_t const b, uint64_t const c ) -> uint64_t const
{
#if __CUDA_ARCH__ >= 500
uint64_t output;
asm( "{"
" lop3.b32 %0, %2, %4, %6, 0xD2;"
" lop3.b32 %1, %3, %5, %7, 0xD2;"
"}" : "=r"(reinterpret_cast<uint2&>(output).x), "=r"(reinterpret_cast<uint2&>(output).y)
: "r"(reinterpret_cast<uint2 const&>(a).x), "r"(reinterpret_cast<uint2 const&>(a).y),
"r"(reinterpret_cast<uint2 const&>(b).x), "r"(reinterpret_cast<uint2 const&>(b).y),
"r"(reinterpret_cast<uint2 const&>(c).x), "r"(reinterpret_cast<uint2 const&>(c).y) );
return output;
#else
return a ^ ((~b) & c);
#endif
}
__global__
void cuda_mine( uint64_t* __restrict__ solution, uint32_t* __restrict__ solution_count, uint64_t const threads )
{
uint64_t const nounce{ threads + (blockDim.x * blockIdx.x + threadIdx.x) };
uint64_t state[25], C[5], D[5];
uint64_t n[11] { ROTL64(nounce, 7) };
n[ 1] = ROTL64(n[ 0], 1);
n[ 2] = ROTL64(n[ 1], 6);
n[ 3] = ROTL64(n[ 2], 2);
n[ 4] = ROTL64(n[ 3], 4);
n[ 5] = ROTL64(n[ 4], 7);
n[ 6] = ROTL64(n[ 5], 12);
n[ 7] = ROTL64(n[ 6], 5);
n[ 8] = ROTL64(n[ 7], 11);
n[ 9] = ROTL64(n[ 8], 7);
n[10] = ROTL64(n[ 9], 1);
C[0] = d_mid[ 0];
C[1] = d_mid[ 1];
C[2] = d_mid[ 2] ^ n[ 7];
C[3] = d_mid[ 3];
C[4] = d_mid[ 4] ^ n[ 2];
state[ 0] = chi( C[0], C[1], C[2] ) ^ RC[0];
state[ 1] = chi( C[1], C[2], C[3] );
state[ 2] = chi( C[2], C[3], C[4] );
state[ 3] = chi( C[3], C[4], C[0] );
state[ 4] = chi( C[4], C[0], C[1] );
C[0] = d_mid[ 5];
C[1] = d_mid[ 6] ^ n[ 4];
C[2] = d_mid[ 7];
C[3] = d_mid[ 8];
C[4] = d_mid[ 9] ^ n[ 9];
state[ 5] = chi( C[0], C[1], C[2] );
state[ 6] = chi( C[1], C[2], C[3] );
state[ 7] = chi( C[2], C[3], C[4] );
state[ 8] = chi( C[3], C[4], C[0] );
state[ 9] = chi( C[4], C[0], C[1] );
C[0] = d_mid[10];
C[1] = d_mid[11] ^ n[ 0];
C[2] = d_mid[12];
C[3] = d_mid[13] ^ n[ 1];
C[4] = d_mid[14];
state[10] = chi( C[0], C[1], C[2] );
state[11] = chi( C[1], C[2], C[3] );
state[12] = chi( C[2], C[3], C[4] );
state[13] = chi( C[3], C[4], C[0] );
state[14] = chi( C[4], C[0], C[1] );
C[0] = d_mid[15] ^ n[ 5];
C[1] = d_mid[16];
C[2] = d_mid[17];
C[3] = d_mid[18] ^ n[ 3];
C[4] = d_mid[19];
state[15] = chi( C[0], C[1], C[2] );
state[16] = chi( C[1], C[2], C[3] );
state[17] = chi( C[2], C[3], C[4] );
state[18] = chi( C[3], C[4], C[0] );
state[19] = chi( C[4], C[0], C[1] );
C[0] = d_mid[20] ^ n[10];
C[1] = d_mid[21] ^ n[ 8];
C[2] = d_mid[22] ^ n[ 6];
C[3] = d_mid[23];
C[4] = d_mid[24];
state[20] = chi( C[0], C[1], C[2] );
state[21] = chi( C[1], C[2], C[3] );
state[22] = chi( C[2], C[3], C[4] );
state[23] = chi( C[3], C[4], C[0] );
state[24] = chi( C[4], C[0], C[1] );
#if __CUDA_ARCH__ >= 350
# pragma unroll
#endif
for( uint_fast8_t i{ 1 }; i < 23; ++i )
{
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
C[(x + 6) % 5] = xor5( state[x], state[x + 5], state[x + 10], state[x + 15], state[x + 20] );
}
#if __CUDA_ARCH__ >= 350
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
D[x] = ROTL64(C[(x + 2) % 5], 1);
state[x] = xor3( state[x] , D[x], C[x] );
state[x + 5] = xor3( state[x + 5], D[x], C[x] );
state[x + 10] = xor3( state[x + 10], D[x], C[x] );
state[x + 15] = xor3( state[x + 15], D[x], C[x] );
state[x + 20] = xor3( state[x + 20], D[x], C[x] );
}
#else
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
D[x] = ROTL64(C[(x + 2) % 5], 1) ^ C[x];
state[x] = state[x] ^ D[x];
state[x + 5] = state[x + 5] ^ D[x];
state[x + 10] = state[x + 10] ^ D[x];
state[x + 15] = state[x + 15] ^ D[x];
state[x + 20] = state[x + 20] ^ D[x];
}
#endif
C[0] = state[1];
state[ 1] = ROTR64( state[ 6], 20 );
state[ 6] = ROTL64( state[ 9], 20 );
state[ 9] = ROTR64( state[22], 3 );
state[22] = ROTR64( state[14], 25 );
state[14] = ROTL64( state[20], 18 );
state[20] = ROTR64( state[ 2], 2 );
state[ 2] = ROTR64( state[12], 21 );
state[12] = ROTL64( state[13], 25 );
state[13] = ROTL64( state[19], 8 );
state[19] = ROTR64( state[23], 8 );
state[23] = ROTR64( state[15], 23 );
state[15] = ROTL64( state[ 4], 27 );
state[ 4] = ROTL64( state[24], 14 );
state[24] = ROTL64( state[21], 2 );
state[21] = ROTR64( state[ 8], 9 );
state[ 8] = ROTR64( state[16], 19 );
state[16] = ROTR64( state[ 5], 28 );
state[ 5] = ROTL64( state[ 3], 28 );
state[ 3] = ROTL64( state[18], 21 );
state[18] = ROTL64( state[17], 15 );
state[17] = ROTL64( state[11], 10 );
state[11] = ROTL64( state[ 7], 6 );
state[ 7] = ROTL64( state[10], 3 );
state[10] = ROTL64( C[0], 1 );
for( uint_fast8_t x{ 0 }; x < 25; x += 5 )
{
C[0] = state[x];
C[1] = state[x + 1];
C[2] = state[x + 2];
C[3] = state[x + 3];
C[4] = state[x + 4];
state[x] = chi( C[0], C[1], C[2] );
state[x + 1] = chi( C[1], C[2], C[3] );
state[x + 2] = chi( C[2], C[3], C[4] );
state[x + 3] = chi( C[3], C[4], C[0] );
state[x + 4] = chi( C[4], C[0], C[1] );
}
state[0] = state[0] ^ RC[i];
}
for( uint_fast8_t x{ 0 }; x < 5; ++x )
{
C[(x + 6) % 5 ] = xor5( state[x], state[x + 5], state[x + 10], state[x + 15], state[x + 20] );
}
D[0] = ROTL64(C[2], 1);
D[1] = ROTL64(C[3], 1);
D[2] = ROTL64(C[4], 1);
state[ 0] = xor3( state[ 0], D[0], C[0] );
state[ 6] = xor3( state[ 6], D[1], C[1] );
state[12] = xor3( state[12], D[2], C[2] );
state[ 6] = ROTR64(state[ 6], 20);
state[12] = ROTR64(state[12], 21);
state[ 0] = chi( state[ 0], state[ 6], state[12] ) ^ RC[23];
if( bswap_64( state[0] ) <= d_target )
{
uint64_t cIdx{ atomicAdd( solution_count, 1 ) };
if( cIdx >= 256 ) return;
solution[cIdx] = nounce;
}
}
// --------------------------------------------------------------------
auto CUDASolver::cudaInit() -> void
{
cudaSetDevice( m_device );
cudaDeviceProp device_prop;
cudaSafeCall( cudaGetDeviceProperties( &device_prop, m_device ) );
int32_t compute_version{ device_prop.major * 100 + device_prop.minor * 10 };
if( compute_version <= 500 )
{
m_intensity = m_intensity <= 40.55 ? m_intensity : 40.55;
m_threads = static_cast<uint64_t>( std::pow( 2, m_intensity <= 40.55 ? m_intensity : 40.55 ) );
}
m_block.x = compute_version > 500 ? TPB50 : TPB35;
m_grid.x = uint32_t((m_threads + m_block.x - 1) / m_block.x);
if( !m_gpu_initialized )
{
// CPU usage goes _insane_ without this.
cudaSafeCall( cudaDeviceReset() );
// so we don't actually _use_ L1 or local memory . . .
cudaSafeCall( cudaSetDeviceFlags( cudaDeviceScheduleBlockingSync ) );
cudaSafeCall( cudaMalloc( reinterpret_cast<void**>(&d_solution_count), 4 ) );
cudaSafeCall( cudaMallocHost( reinterpret_cast<void**>(&h_solution_count), 4 ) );
cudaSafeCall( cudaMalloc( reinterpret_cast<void**>(&d_solutions), 256*8 ) );
cudaSafeCall( cudaMallocHost( reinterpret_cast<void**>(&h_solutions), 256*8 ) );
cudaResetSolution();
m_gpu_initialized = true;
}
}
auto CUDASolver::cudaCleanup() -> void
{
cudaSafeCall( cudaSetDevice( m_device ) );
cudaSafeCall( cudaThreadSynchronize() );
cudaSafeCall( cudaFree( d_solution_count ) );
cudaSafeCall( cudaFreeHost( h_solution_count ) );
cudaSafeCall( cudaFree( d_solutions ) );
cudaSafeCall( cudaFreeHost( h_solutions ) );
cudaSafeCall( cudaDeviceReset() );
m_gpu_initialized = false;
}
auto CUDASolver::cudaResetSolution() -> void
{
cudaSetDevice( m_device );
std::memset( h_solution_count, 0u, 4 );
cudaSafeCall( cudaMemset( d_solution_count, 0u, 4 ) );
}
auto CUDASolver::pushTarget() -> void
{
cudaSetDevice( m_device );
uint64_t target{ getTarget() };
cudaSafeCall( cudaMemcpyToSymbol( d_target, &target, 8, 0, cudaMemcpyHostToDevice) );
m_new_target = false;
}
auto CUDASolver::pushMessage() -> void
{
cudaSetDevice( m_device );
cudaSafeCall( cudaMemcpyToSymbol( d_mid, getMidstate().data(), 200, 0, cudaMemcpyHostToDevice) );
m_new_message = false;
}
auto CUDASolver::findSolution() -> void
{
cudaInit();
cudaSetDevice( m_device );
do
{
if( m_new_target ) { pushTarget(); }
if( m_new_message ) { pushMessage(); }
cuda_mine <<< m_grid, m_block >>> ( d_solutions, d_solution_count, getNextSearchSpace() );
cudaError_t cudaerr = cudaDeviceSynchronize();
if( cudaerr != cudaSuccess )
{
std::cerr << "Kernel launch failed with error "
<< cudaerr
<< ": \x1b[38;5;196m"
<< cudaGetErrorString( cudaerr )
<< ".\x1b[0m\n"
<< "Check your hardware configuration.\n";
exit( EXIT_FAILURE );
}
cudaSafeCall( cudaMemcpy( h_solution_count, d_solution_count, 4, cudaMemcpyDeviceToHost ) );
if( *h_solution_count )
{
cudaSafeCall( cudaMemcpy( h_solutions, d_solutions, (*h_solution_count)*8, cudaMemcpyDeviceToHost ) );
pushSolutions();
cudaResetSolution();
}
} while( !m_stop );
m_stopped = true;
}
|
c37676d007792230f1a3cd31f8c53c6ca6b4e811.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include "cuda_data_partition.hpp"
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include <LightGBM/tree.h>
#include <algorithm>
#include <vector>
namespace LightGBM {
__global__ void FillDataIndicesBeforeTrainKernel(const data_size_t num_data,
data_size_t* data_indices, int* cuda_data_index_to_leaf_index) {
const unsigned int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if (data_index < num_data) {
data_indices[data_index] = data_index;
cuda_data_index_to_leaf_index[data_index] = 0;
}
}
__global__ void FillDataIndexToLeafIndexKernel(
const data_size_t num_data,
const data_size_t* data_indices,
int* data_index_to_leaf_index) {
const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
if (data_index < num_data) {
data_index_to_leaf_index[data_indices[data_index]] = 0;
}
}
void CUDADataPartition::LaunchFillDataIndicesBeforeTrain() {
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
hipLaunchKernelGGL(( FillDataIndicesBeforeTrainKernel), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_);
}
void CUDADataPartition::LaunchFillDataIndexToLeafIndex() {
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
hipLaunchKernelGGL(( FillDataIndexToLeafIndexKernel), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0, num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_);
}
__device__ __forceinline__ void PrepareOffset(const data_size_t num_data_in_leaf, uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer,
const uint16_t thread_to_left_offset_cnt, uint16_t* shared_mem_buffer) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockDim_x = blockDim.x;
const uint16_t thread_to_left_offset = ShufflePrefixSum<uint16_t>(thread_to_left_offset_cnt, shared_mem_buffer);
const data_size_t num_data_in_block = (blockIdx.x + 1) * blockDim_x <= num_data_in_leaf ? static_cast<data_size_t>(blockDim_x) :
num_data_in_leaf - static_cast<data_size_t>(blockIdx.x * blockDim_x);
if (static_cast<data_size_t>(threadIdx_x) < num_data_in_block) {
block_to_left_offset[threadIdx_x] = thread_to_left_offset;
}
if (threadIdx_x == blockDim_x - 1) {
if (num_data_in_block > 0) {
const data_size_t data_to_left = static_cast<data_size_t>(thread_to_left_offset);
block_to_left_offset_buffer[blockIdx.x + 1] = data_to_left;
block_to_right_offset_buffer[blockIdx.x + 1] = num_data_in_block - data_to_left;
} else {
block_to_left_offset_buffer[blockIdx.x + 1] = 0;
block_to_right_offset_buffer[blockIdx.x + 1] = 0;
}
}
}
template <typename T>
__device__ bool CUDAFindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
#define UpdateDataIndexToLeafIndexKernel_PARAMS \
const BIN_TYPE* column_data, \
const data_size_t num_data_in_leaf, \
const data_size_t* data_indices_in_leaf, \
const uint32_t th, \
const uint32_t t_zero_bin, \
const uint32_t max_bin, \
const uint32_t min_bin, \
const int left_leaf_index, \
const int right_leaf_index, \
const int default_leaf_index, \
const int missing_default_leaf_index
#define UpdateDataIndexToLeafIndex_ARGS \
column_data, \
num_data_in_leaf, \
data_indices_in_leaf, th, \
t_zero_bin, \
max_bin, \
min_bin, \
left_leaf_index, \
right_leaf_index, \
default_leaf_index, \
missing_default_leaf_index
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE>
__global__ void UpdateDataIndexToLeafIndexKernel(
UpdateDataIndexToLeafIndexKernel_PARAMS,
int* cuda_data_index_to_leaf_index) {
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (!MIN_IS_MAX) {
if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) ||
(MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) ||
(!USE_MIN_BIN && bin == 0)) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
}
} else if (bin > th) {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
}
} else {
if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else if (bin != max_bin) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
}
} else {
if (MISSING_IS_NA && !MFB_IS_NA) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
if (!MAX_TO_LEFT) {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
}
}
}
}
}
}
template <typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool missing_is_zero,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (min_bin < max_bin) {
if (!missing_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
} else {
if (!missing_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner0(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!missing_is_na) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner1(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner2(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_na) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner3(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!max_to_left) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner4(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool is_single_feature_in_column) {
if (!is_single_feature_in_column) {
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>)
, dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3],
UpdateDataIndexToLeafIndex_ARGS,
cuda_data_index_to_leaf_index_);
} else {
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>)
, dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3],
UpdateDataIndexToLeafIndex_ARGS,
cuda_data_index_to_leaf_index_);
}
}
#define GenDataToLeftBitVectorKernel_PARMS \
const BIN_TYPE* column_data, \
const data_size_t num_data_in_leaf, \
const data_size_t* data_indices_in_leaf, \
const uint32_t th, \
const uint32_t t_zero_bin, \
const uint32_t max_bin, \
const uint32_t min_bin, \
const uint8_t split_default_to_left, \
const uint8_t split_missing_default_to_left
#define GenBitVector_ARGS \
column_data, \
num_data_in_leaf, \
data_indices_in_leaf, \
th, \
t_zero_bin, \
max_bin, \
min_bin, \
split_default_to_left, \
split_missing_default_to_left
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE>
__global__ void GenDataToLeftBitVectorKernel(
GenDataToLeftBitVectorKernel_PARMS,
uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer) {
__shared__ uint16_t shared_mem_buffer[32];
uint16_t thread_to_left_offset_cnt = 0;
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (!MIN_IS_MAX) {
if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) ||
(MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) ||
(!USE_MIN_BIN && bin == 0)) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO || MFB_IS_ZERO)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else {
thread_to_left_offset_cnt = split_default_to_left;
}
} else if (bin <= th) {
thread_to_left_offset_cnt = 1;
}
} else {
if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if (bin != max_bin) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else {
thread_to_left_offset_cnt = split_default_to_left;
}
} else {
if (MISSING_IS_NA && !MFB_IS_NA) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if (MAX_TO_LEFT) {
thread_to_left_offset_cnt = 1;
}
}
}
}
__syncthreads();
PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer,
thread_to_left_offset_cnt, shared_mem_buffer);
}
template <typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner(
GenDataToLeftBitVectorKernel_PARMS,
const bool missing_is_zero,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (min_bin < max_bin) {
if (!missing_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner0<false, false, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner0<false, true, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
} else {
if (!missing_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner0<true, false, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner0<true, true, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner0(
GenDataToLeftBitVectorKernel_PARMS,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!missing_is_na) {
LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner1(
GenDataToLeftBitVectorKernel_PARMS,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner2(
GenDataToLeftBitVectorKernel_PARMS,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_na) {
LaunchGenDataToLeftBitVectorKernelInner3
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE>
(GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner3
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE>
(GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner3(
GenDataToLeftBitVectorKernel_PARMS,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!max_bin_to_left) {
LaunchGenDataToLeftBitVectorKernelInner4
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE>
(GenBitVector_ARGS, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner4
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE>
(GenBitVector_ARGS, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner4(
GenDataToLeftBitVectorKernel_PARMS,
const bool is_single_feature_in_column) {
if (!is_single_feature_in_column) {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>)
, dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_ARGS,
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_);
} else {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>)
, dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_ARGS,
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_);
}
}
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernel(
const data_size_t num_data_in_leaf,
const int split_feature_index,
const uint32_t split_threshold,
const uint8_t split_default_left,
const data_size_t leaf_data_start,
const int left_leaf_index,
const int right_leaf_index) {
const bool missing_is_zero = static_cast<bool>(cuda_column_data_->feature_missing_is_zero(split_feature_index));
const bool missing_is_na = static_cast<bool>(cuda_column_data_->feature_missing_is_na(split_feature_index));
const bool mfb_is_zero = static_cast<bool>(cuda_column_data_->feature_mfb_is_zero(split_feature_index));
const bool mfb_is_na = static_cast<bool>(cuda_column_data_->feature_mfb_is_na(split_feature_index));
const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index];
const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index);
const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index);
const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index);
const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index);
uint32_t th = split_threshold + min_bin;
uint32_t t_zero_bin = min_bin + default_bin;
if (most_freq_bin == 0) {
--th;
--t_zero_bin;
}
uint8_t split_default_to_left = 0;
uint8_t split_missing_default_to_left = 0;
int default_leaf_index = right_leaf_index;
int missing_default_leaf_index = right_leaf_index;
if (most_freq_bin <= split_threshold) {
split_default_to_left = 1;
default_leaf_index = left_leaf_index;
}
if (missing_is_zero || missing_is_na) {
if (split_default_left) {
split_missing_default_to_left = 1;
missing_default_leaf_index = left_leaf_index;
}
}
const int column_index = cuda_column_data_->feature_to_column(split_feature_index);
const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index);
const bool max_bin_to_left = (max_bin <= th);
const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start;
const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index);
if (bit_type == 8) {
const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint8_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint8_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
} else if (bit_type == 16) {
const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint16_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint16_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
} else if (bit_type == 32) {
const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint32_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint32_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
}
}
#undef UpdateDataIndexToLeafIndexKernel_PARAMS
#undef UpdateDataIndexToLeafIndex_ARGS
#undef GenDataToLeftBitVectorKernel_PARMS
#undef GenBitVector_ARGS
template <typename BIN_TYPE, bool USE_MIN_BIN>
__global__ void UpdateDataIndexToLeafIndexKernel_Categorical(
const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf,
const uint32_t* bitset, const int bitset_len, const BIN_TYPE* column_data,
// values from feature
const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset,
int* cuda_data_index_to_leaf_index, const int left_leaf_index, const int right_leaf_index,
const int default_leaf_index) {
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
} else if (!USE_MIN_BIN && bin == 0) {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
} else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
}
}
}
// for categorical features
template <typename BIN_TYPE, bool USE_MIN_BIN>
__global__ void GenDataToLeftBitVectorKernel_Categorical(
const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf,
const uint32_t* bitset, int bitset_len, const BIN_TYPE* column_data,
// values from feature
const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset,
const uint8_t split_default_to_left,
uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) {
__shared__ uint16_t shared_mem_buffer[32];
uint16_t thread_to_left_offset_cnt = 0;
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) {
thread_to_left_offset_cnt = split_default_to_left;
} else if (!USE_MIN_BIN && bin == 0) {
thread_to_left_offset_cnt = split_default_to_left;
} else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) {
thread_to_left_offset_cnt = 1;
}
}
__syncthreads();
PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer,
thread_to_left_offset_cnt, shared_mem_buffer);
}
#define GenBitVector_Categorical_ARGS \
num_data_in_leaf, data_indices_in_leaf, \
bitset, bitset_len, \
column_data, max_bin, min_bin, mfb_offset, split_default_to_left, \
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_
#define UpdateDataIndexToLeafIndex_Categorical_ARGS \
num_data_in_leaf, data_indices_in_leaf, \
bitset, bitset_len, \
column_data, max_bin, min_bin, mfb_offset, \
cuda_data_index_to_leaf_index_, left_leaf_index, right_leaf_index, default_leaf_index
void CUDADataPartition::LaunchGenDataToLeftBitVectorCategoricalKernel(
const data_size_t num_data_in_leaf,
const int split_feature_index,
const uint32_t* bitset,
const int bitset_len,
const uint8_t split_default_left,
const data_size_t leaf_data_start,
const int left_leaf_index,
const int right_leaf_index) {
const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start;
const int column_index = cuda_column_data_->feature_to_column(split_feature_index);
const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index);
const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index];
const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index);
const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index);
const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index);
const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index);
const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index);
const int8_t mfb_offset = static_cast<int8_t>(most_freq_bin == 0);
std::vector<uint32_t> host_bitset(bitset_len, 0);
CopyFromCUDADeviceToHost<uint32_t>(host_bitset.data(), bitset, bitset_len, __FILE__, __LINE__);
uint8_t split_default_to_left = 0;
int default_leaf_index = right_leaf_index;
if (most_freq_bin > 0 && Common::FindInBitset(host_bitset.data(), bitset_len, most_freq_bin)) {
split_default_to_left = 1;
default_leaf_index = left_leaf_index;
}
if (bit_type == 8) {
const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer);
if (is_single_feature_in_column) {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint8_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint8_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
} else if (bit_type == 16) {
const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer);
if (is_single_feature_in_column) {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint16_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint16_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
} else if (bit_type == 32) {
const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer);
if (is_single_feature_in_column) {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint32_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, false>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
hipLaunchKernelGGL(( GenDataToLeftBitVectorKernel_Categorical<uint32_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[0], GenBitVector_Categorical_ARGS);
hipLaunchKernelGGL(( UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, true>), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[3], UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
}
}
#undef GenBitVector_Categorical_ARGS
#undef UpdateDataIndexToLeafIndex_Categorical_ARGS
__global__ void AggregateBlockOffsetKernel0(
const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const data_size_t num_blocks) {
__shared__ uint32_t shared_mem_buffer[32];
__shared__ uint32_t to_left_total_count;
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index];
const unsigned int blockDim_x = blockDim.x;
const unsigned int threadIdx_x = threadIdx.x;
const data_size_t num_blocks_plus_1 = num_blocks + 1;
const uint32_t num_blocks_per_thread = (num_blocks_plus_1 + blockDim_x - 1) / blockDim_x;
const uint32_t remain = num_blocks_plus_1 - ((num_blocks_per_thread - 1) * blockDim_x);
const uint32_t remain_offset = remain * num_blocks_per_thread;
uint32_t thread_start_block_index = 0;
uint32_t thread_end_block_index = 0;
if (threadIdx_x < remain) {
thread_start_block_index = threadIdx_x * num_blocks_per_thread;
thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread, num_blocks_plus_1);
} else {
thread_start_block_index = remain_offset + (num_blocks_per_thread - 1) * (threadIdx_x - remain);
thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread - 1, num_blocks_plus_1);
}
if (threadIdx.x == 0) {
block_to_right_offset_buffer[0] = 0;
}
__syncthreads();
for (uint32_t block_index = thread_start_block_index + 1; block_index < thread_end_block_index; ++block_index) {
block_to_left_offset_buffer[block_index] += block_to_left_offset_buffer[block_index - 1];
block_to_right_offset_buffer[block_index] += block_to_right_offset_buffer[block_index - 1];
}
__syncthreads();
uint32_t block_to_left_offset = 0;
uint32_t block_to_right_offset = 0;
if (thread_start_block_index < thread_end_block_index && thread_start_block_index > 1) {
block_to_left_offset = block_to_left_offset_buffer[thread_start_block_index - 1];
block_to_right_offset = block_to_right_offset_buffer[thread_start_block_index - 1];
}
block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer);
__syncthreads();
block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer);
if (threadIdx_x == blockDim_x - 1) {
to_left_total_count = block_to_left_offset + block_to_left_offset_buffer[num_blocks];
}
__syncthreads();
const uint32_t to_left_thread_block_offset = block_to_left_offset;
const uint32_t to_right_thread_block_offset = block_to_right_offset + to_left_total_count;
for (uint32_t block_index = thread_start_block_index; block_index < thread_end_block_index; ++block_index) {
block_to_left_offset_buffer[block_index] += to_left_thread_block_offset;
block_to_right_offset_buffer[block_index] += to_right_thread_block_offset;
}
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count);
cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count);
cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end;
cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count);
}
}
__global__ void AggregateBlockOffsetKernel1(
const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const data_size_t num_blocks) {
__shared__ uint32_t shared_mem_buffer[32];
__shared__ uint32_t to_left_total_count;
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index];
const unsigned int threadIdx_x = threadIdx.x;
uint32_t block_to_left_offset = 0;
uint32_t block_to_right_offset = 0;
if (threadIdx_x < static_cast<unsigned int>(num_blocks)) {
block_to_left_offset = block_to_left_offset_buffer[threadIdx_x + 1];
block_to_right_offset = block_to_right_offset_buffer[threadIdx_x + 1];
}
block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer);
__syncthreads();
block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer);
if (threadIdx.x == blockDim.x - 1) {
to_left_total_count = block_to_left_offset;
}
__syncthreads();
if (threadIdx_x < static_cast<unsigned int>(num_blocks)) {
block_to_left_offset_buffer[threadIdx_x + 1] = block_to_left_offset;
block_to_right_offset_buffer[threadIdx_x + 1] = block_to_right_offset + to_left_total_count;
}
if (threadIdx_x == 0) {
block_to_right_offset_buffer[0] = to_left_total_count;
}
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count);
cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count);
cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end;
cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count);
}
}
__global__ void SplitTreeStructureKernel(const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const CUDASplitInfo* best_split_info,
// for leaf splits information update
CUDALeafSplitsStruct* smaller_leaf_splits,
CUDALeafSplitsStruct* larger_leaf_splits,
const int num_total_bin,
hist_t* cuda_hist, hist_t** cuda_hist_pool,
double* cuda_leaf_output,
int* cuda_split_info_buffer) {
const unsigned int to_left_total_cnt = cuda_leaf_num_data[left_leaf_index];
double* cuda_split_info_buffer_for_hessians = reinterpret_cast<double*>(cuda_split_info_buffer + 8);
const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_index == 0) {
cuda_leaf_output[left_leaf_index] = best_split_info->left_value;
} else if (global_thread_index == 1) {
cuda_leaf_output[right_leaf_index] = best_split_info->right_value;
} else if (global_thread_index == 2) {
cuda_split_info_buffer[0] = left_leaf_index;
} else if (global_thread_index == 3) {
cuda_split_info_buffer[1] = cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 4) {
cuda_split_info_buffer[2] = cuda_leaf_data_start[left_leaf_index];
} else if (global_thread_index == 5) {
cuda_split_info_buffer[3] = right_leaf_index;
} else if (global_thread_index == 6) {
cuda_split_info_buffer[4] = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 7) {
cuda_split_info_buffer[5] = cuda_leaf_data_start[right_leaf_index];
} else if (global_thread_index == 8) {
cuda_split_info_buffer_for_hessians[0] = best_split_info->left_sum_hessians;
cuda_split_info_buffer_for_hessians[2] = best_split_info->left_sum_gradients;
} else if (global_thread_index == 9) {
cuda_split_info_buffer_for_hessians[1] = best_split_info->right_sum_hessians;
cuda_split_info_buffer_for_hessians[3] = best_split_info->right_sum_gradients;
}
if (cuda_leaf_num_data[left_leaf_index] < cuda_leaf_num_data[right_leaf_index]) {
if (global_thread_index == 0) {
hist_t* parent_hist_ptr = cuda_hist_pool[left_leaf_index];
cuda_hist_pool[right_leaf_index] = parent_hist_ptr;
cuda_hist_pool[left_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin;
smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index];
larger_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index];
} else if (global_thread_index == 1) {
smaller_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients;
} else if (global_thread_index == 2) {
smaller_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians;
} else if (global_thread_index == 3) {
smaller_leaf_splits->num_data_in_leaf = to_left_total_cnt;
} else if (global_thread_index == 4) {
smaller_leaf_splits->gain = best_split_info->left_gain;
} else if (global_thread_index == 5) {
smaller_leaf_splits->leaf_value = best_split_info->left_value;
} else if (global_thread_index == 6) {
smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices;
} else if (global_thread_index == 7) {
larger_leaf_splits->leaf_index = right_leaf_index;
} else if (global_thread_index == 8) {
larger_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients;
} else if (global_thread_index == 9) {
larger_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians;
} else if (global_thread_index == 10) {
larger_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 11) {
larger_leaf_splits->gain = best_split_info->right_gain;
} else if (global_thread_index == 12) {
larger_leaf_splits->leaf_value = best_split_info->right_value;
} else if (global_thread_index == 13) {
larger_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 14) {
cuda_split_info_buffer[6] = left_leaf_index;
} else if (global_thread_index == 15) {
cuda_split_info_buffer[7] = right_leaf_index;
} else if (global_thread_index == 16) {
smaller_leaf_splits->leaf_index = left_leaf_index;
}
} else {
if (global_thread_index == 0) {
larger_leaf_splits->leaf_index = left_leaf_index;
} else if (global_thread_index == 1) {
larger_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients;
} else if (global_thread_index == 2) {
larger_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians;
} else if (global_thread_index == 3) {
larger_leaf_splits->num_data_in_leaf = to_left_total_cnt;
} else if (global_thread_index == 4) {
larger_leaf_splits->gain = best_split_info->left_gain;
} else if (global_thread_index == 5) {
larger_leaf_splits->leaf_value = best_split_info->left_value;
} else if (global_thread_index == 6) {
larger_leaf_splits->data_indices_in_leaf = cuda_data_indices;
} else if (global_thread_index == 7) {
smaller_leaf_splits->leaf_index = right_leaf_index;
} else if (global_thread_index == 8) {
smaller_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients;
} else if (global_thread_index == 9) {
smaller_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians;
} else if (global_thread_index == 10) {
smaller_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 11) {
smaller_leaf_splits->gain = best_split_info->right_gain;
} else if (global_thread_index == 12) {
smaller_leaf_splits->leaf_value = best_split_info->right_value;
} else if (global_thread_index == 13) {
smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 14) {
cuda_hist_pool[right_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin;
smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index];
} else if (global_thread_index == 15) {
larger_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index];
} else if (global_thread_index == 16) {
cuda_split_info_buffer[6] = right_leaf_index;
} else if (global_thread_index == 17) {
cuda_split_info_buffer[7] = left_leaf_index;
}
}
}
__global__ void SplitInnerKernel(const int left_leaf_index, const int right_leaf_index,
const data_size_t* cuda_leaf_data_start, const data_size_t* cuda_leaf_num_data,
const data_size_t* cuda_data_indices,
const data_size_t* block_to_left_offset_buffer, const data_size_t* block_to_right_offset_buffer,
const uint16_t* block_to_left_offset, data_size_t* out_data_indices_in_leaf) {
const data_size_t leaf_num_data_offset = cuda_leaf_data_start[left_leaf_index];
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index] + cuda_leaf_num_data[right_leaf_index];
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockDim_x = blockDim.x;
const unsigned int global_thread_index = blockIdx.x * blockDim_x + threadIdx_x;
const data_size_t* cuda_data_indices_in_leaf = cuda_data_indices + leaf_num_data_offset;
const uint16_t* block_to_left_offset_ptr = block_to_left_offset + blockIdx.x * blockDim_x;
const uint32_t to_right_block_offset = block_to_right_offset_buffer[blockIdx.x];
const uint32_t to_left_block_offset = block_to_left_offset_buffer[blockIdx.x];
data_size_t* left_out_data_indices_in_leaf = out_data_indices_in_leaf + to_left_block_offset;
data_size_t* right_out_data_indices_in_leaf = out_data_indices_in_leaf + to_right_block_offset;
if (static_cast<data_size_t>(global_thread_index) < num_data_in_leaf) {
const uint32_t thread_to_left_offset = (threadIdx_x == 0 ? 0 : block_to_left_offset_ptr[threadIdx_x - 1]);
const bool to_left = block_to_left_offset_ptr[threadIdx_x] > thread_to_left_offset;
if (to_left) {
left_out_data_indices_in_leaf[thread_to_left_offset] = cuda_data_indices_in_leaf[global_thread_index];
} else {
const uint32_t thread_to_right_offset = threadIdx.x - thread_to_left_offset;
right_out_data_indices_in_leaf[thread_to_right_offset] = cuda_data_indices_in_leaf[global_thread_index];
}
}
}
__global__ void CopyDataIndicesKernel(
const data_size_t num_data_in_leaf,
const data_size_t* out_data_indices_in_leaf,
data_size_t* cuda_data_indices) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx_x;
if (global_thread_index < num_data_in_leaf) {
cuda_data_indices[global_thread_index] = out_data_indices_in_leaf[global_thread_index];
}
}
void CUDADataPartition::LaunchSplitInnerKernel(
const data_size_t num_data_in_leaf,
const CUDASplitInfo* best_split_info,
const int left_leaf_index,
const int right_leaf_index,
// for leaf splits information update
CUDALeafSplitsStruct* smaller_leaf_splits,
CUDALeafSplitsStruct* larger_leaf_splits,
data_size_t* left_leaf_num_data_ref,
data_size_t* right_leaf_num_data_ref,
data_size_t* left_leaf_start_ref,
data_size_t* right_leaf_start_ref,
double* left_leaf_sum_of_hessians_ref,
double* right_leaf_sum_of_hessians_ref,
double* left_leaf_sum_of_gradients_ref,
double* right_leaf_sum_of_gradients_ref) {
int num_blocks_final_ref = grid_dim_ - 1;
int num_blocks_final_aligned = 1;
while (num_blocks_final_ref > 0) {
num_blocks_final_aligned <<= 1;
num_blocks_final_ref >>= 1;
}
global_timer.Start("CUDADataPartition::AggregateBlockOffsetKernel");
if (grid_dim_ > AGGREGATE_BLOCK_SIZE_DATA_PARTITION) {
hipLaunchKernelGGL(( AggregateBlockOffsetKernel0), dim3(1), dim3(AGGREGATE_BLOCK_SIZE_DATA_PARTITION), 0, cuda_streams_[0],
left_leaf_index,
right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_data_indices_,
grid_dim_);
} else {
hipLaunchKernelGGL(( AggregateBlockOffsetKernel1), dim3(1), dim3(num_blocks_final_aligned), 0, cuda_streams_[0],
left_leaf_index,
right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_data_indices_,
grid_dim_);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::AggregateBlockOffsetKernel");
global_timer.Start("CUDADataPartition::SplitInnerKernel");
hipLaunchKernelGGL(( SplitInnerKernel), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[1],
left_leaf_index, right_leaf_index, cuda_leaf_data_start_, cuda_leaf_num_data_, cuda_data_indices_,
cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_block_to_left_offset_,
cuda_out_data_indices_in_leaf_);
global_timer.Stop("CUDADataPartition::SplitInnerKernel");
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Start("CUDADataPartition::SplitTreeStructureKernel");
hipLaunchKernelGGL(( SplitTreeStructureKernel), dim3(4), dim3(5), 0, cuda_streams_[0], left_leaf_index, right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_out_data_indices_in_leaf_,
best_split_info,
smaller_leaf_splits,
larger_leaf_splits,
num_total_bin_,
cuda_hist_,
cuda_hist_pool_,
cuda_leaf_output_, cuda_split_info_buffer_);
global_timer.Stop("CUDADataPartition::SplitTreeStructureKernel");
std::vector<int> cpu_split_info_buffer(16);
const double* cpu_sum_hessians_info = reinterpret_cast<const double*>(cpu_split_info_buffer.data() + 8);
global_timer.Start("CUDADataPartition::CopyFromCUDADeviceToHostAsync");
CopyFromCUDADeviceToHostAsync<int>(cpu_split_info_buffer.data(), cuda_split_info_buffer_, 16, cuda_streams_[0], __FILE__, __LINE__);
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::CopyFromCUDADeviceToHostAsync");
const data_size_t left_leaf_num_data = cpu_split_info_buffer[1];
const data_size_t left_leaf_data_start = cpu_split_info_buffer[2];
const data_size_t right_leaf_num_data = cpu_split_info_buffer[4];
global_timer.Start("CUDADataPartition::CopyDataIndicesKernel");
hipLaunchKernelGGL(( CopyDataIndicesKernel), dim3(grid_dim_), dim3(block_dim_), 0, cuda_streams_[2],
left_leaf_num_data + right_leaf_num_data, cuda_out_data_indices_in_leaf_, cuda_data_indices_ + left_leaf_data_start);
global_timer.Stop("CUDADataPartition::CopyDataIndicesKernel");
const data_size_t right_leaf_data_start = cpu_split_info_buffer[5];
*left_leaf_num_data_ref = left_leaf_num_data;
*left_leaf_start_ref = left_leaf_data_start;
*right_leaf_num_data_ref = right_leaf_num_data;
*right_leaf_start_ref = right_leaf_data_start;
*left_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[0];
*right_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[1];
*left_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[2];
*right_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[3];
}
template <bool USE_BAGGING>
__global__ void AddPredictionToScoreKernel(
const data_size_t* data_indices_in_leaf,
const double* leaf_value, double* cuda_scores,
const int* cuda_data_index_to_leaf_index, const data_size_t num_data) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockIdx_x = blockIdx.x;
const unsigned int blockDim_x = blockDim.x;
const data_size_t local_data_index = static_cast<data_size_t>(blockIdx_x * blockDim_x + threadIdx_x);
if (local_data_index < num_data) {
if (USE_BAGGING) {
const data_size_t global_data_index = data_indices_in_leaf[local_data_index];
const int leaf_index = cuda_data_index_to_leaf_index[global_data_index];
const double leaf_prediction_value = leaf_value[leaf_index];
cuda_scores[global_data_index] += leaf_prediction_value;
} else {
const int leaf_index = cuda_data_index_to_leaf_index[local_data_index];
const double leaf_prediction_value = leaf_value[leaf_index];
cuda_scores[local_data_index] += leaf_prediction_value;
}
}
}
void CUDADataPartition::LaunchAddPredictionToScoreKernel(const double* leaf_value, double* cuda_scores) {
global_timer.Start("CUDADataPartition::AddPredictionToScoreKernel");
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
if (use_bagging_) {
hipLaunchKernelGGL(( AddPredictionToScoreKernel<true>), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0,
cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root);
} else {
hipLaunchKernelGGL(( AddPredictionToScoreKernel<false>), dim3(num_blocks), dim3(FILL_INDICES_BLOCK_SIZE_DATA_PARTITION), 0, 0,
cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::AddPredictionToScoreKernel");
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
| c37676d007792230f1a3cd31f8c53c6ca6b4e811.cu | /*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for
* license information.
*/
#ifdef USE_CUDA_EXP
#include "cuda_data_partition.hpp"
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include <LightGBM/tree.h>
#include <algorithm>
#include <vector>
namespace LightGBM {
__global__ void FillDataIndicesBeforeTrainKernel(const data_size_t num_data,
data_size_t* data_indices, int* cuda_data_index_to_leaf_index) {
const unsigned int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if (data_index < num_data) {
data_indices[data_index] = data_index;
cuda_data_index_to_leaf_index[data_index] = 0;
}
}
__global__ void FillDataIndexToLeafIndexKernel(
const data_size_t num_data,
const data_size_t* data_indices,
int* data_index_to_leaf_index) {
const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
if (data_index < num_data) {
data_index_to_leaf_index[data_indices[data_index]] = 0;
}
}
void CUDADataPartition::LaunchFillDataIndicesBeforeTrain() {
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
FillDataIndicesBeforeTrainKernel<<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_);
}
void CUDADataPartition::LaunchFillDataIndexToLeafIndex() {
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
FillDataIndexToLeafIndexKernel<<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(num_data_in_root, cuda_data_indices_, cuda_data_index_to_leaf_index_);
}
__device__ __forceinline__ void PrepareOffset(const data_size_t num_data_in_leaf, uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer,
const uint16_t thread_to_left_offset_cnt, uint16_t* shared_mem_buffer) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockDim_x = blockDim.x;
const uint16_t thread_to_left_offset = ShufflePrefixSum<uint16_t>(thread_to_left_offset_cnt, shared_mem_buffer);
const data_size_t num_data_in_block = (blockIdx.x + 1) * blockDim_x <= num_data_in_leaf ? static_cast<data_size_t>(blockDim_x) :
num_data_in_leaf - static_cast<data_size_t>(blockIdx.x * blockDim_x);
if (static_cast<data_size_t>(threadIdx_x) < num_data_in_block) {
block_to_left_offset[threadIdx_x] = thread_to_left_offset;
}
if (threadIdx_x == blockDim_x - 1) {
if (num_data_in_block > 0) {
const data_size_t data_to_left = static_cast<data_size_t>(thread_to_left_offset);
block_to_left_offset_buffer[blockIdx.x + 1] = data_to_left;
block_to_right_offset_buffer[blockIdx.x + 1] = num_data_in_block - data_to_left;
} else {
block_to_left_offset_buffer[blockIdx.x + 1] = 0;
block_to_right_offset_buffer[blockIdx.x + 1] = 0;
}
}
}
template <typename T>
__device__ bool CUDAFindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
#define UpdateDataIndexToLeafIndexKernel_PARAMS \
const BIN_TYPE* column_data, \
const data_size_t num_data_in_leaf, \
const data_size_t* data_indices_in_leaf, \
const uint32_t th, \
const uint32_t t_zero_bin, \
const uint32_t max_bin, \
const uint32_t min_bin, \
const int left_leaf_index, \
const int right_leaf_index, \
const int default_leaf_index, \
const int missing_default_leaf_index
#define UpdateDataIndexToLeafIndex_ARGS \
column_data, \
num_data_in_leaf, \
data_indices_in_leaf, th, \
t_zero_bin, \
max_bin, \
min_bin, \
left_leaf_index, \
right_leaf_index, \
default_leaf_index, \
missing_default_leaf_index
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE>
__global__ void UpdateDataIndexToLeafIndexKernel(
UpdateDataIndexToLeafIndexKernel_PARAMS,
int* cuda_data_index_to_leaf_index) {
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (!MIN_IS_MAX) {
if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) ||
(MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) ||
(!USE_MIN_BIN && bin == 0)) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
}
} else if (bin > th) {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
}
} else {
if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else if (bin != max_bin) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
}
} else {
if (MISSING_IS_NA && !MFB_IS_NA) {
cuda_data_index_to_leaf_index[global_data_index] = missing_default_leaf_index;
} else {
if (!MAX_TO_LEFT) {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
}
}
}
}
}
}
template <typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool missing_is_zero,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (min_bin < max_bin) {
if (!missing_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<false, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
} else {
if (!missing_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner0<true, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner0(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!missing_is_na) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_zero, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner1(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_zero) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, mfb_is_na, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner2(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool mfb_is_na,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_na) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner3<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, max_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner3(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool max_to_left,
const bool is_single_feature_in_column) {
if (!max_to_left) {
LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column);
} else {
LaunchUpdateDataIndexToLeafIndexKernel_Inner4<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE>
(UpdateDataIndexToLeafIndex_ARGS, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE>
void CUDADataPartition::LaunchUpdateDataIndexToLeafIndexKernel_Inner4(
UpdateDataIndexToLeafIndexKernel_PARAMS,
const bool is_single_feature_in_column) {
if (!is_single_feature_in_column) {
UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>
<<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(
UpdateDataIndexToLeafIndex_ARGS,
cuda_data_index_to_leaf_index_);
} else {
UpdateDataIndexToLeafIndexKernel<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>
<<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(
UpdateDataIndexToLeafIndex_ARGS,
cuda_data_index_to_leaf_index_);
}
}
#define GenDataToLeftBitVectorKernel_PARMS \
const BIN_TYPE* column_data, \
const data_size_t num_data_in_leaf, \
const data_size_t* data_indices_in_leaf, \
const uint32_t th, \
const uint32_t t_zero_bin, \
const uint32_t max_bin, \
const uint32_t min_bin, \
const uint8_t split_default_to_left, \
const uint8_t split_missing_default_to_left
#define GenBitVector_ARGS \
column_data, \
num_data_in_leaf, \
data_indices_in_leaf, \
th, \
t_zero_bin, \
max_bin, \
min_bin, \
split_default_to_left, \
split_missing_default_to_left
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, bool USE_MIN_BIN, typename BIN_TYPE>
__global__ void GenDataToLeftBitVectorKernel(
GenDataToLeftBitVectorKernel_PARMS,
uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer) {
__shared__ uint16_t shared_mem_buffer[32];
uint16_t thread_to_left_offset_cnt = 0;
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (!MIN_IS_MAX) {
if ((MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) ||
(MISSING_IS_NA && !MFB_IS_NA && bin == max_bin)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if ((USE_MIN_BIN && (bin < min_bin || bin > max_bin)) ||
(!USE_MIN_BIN && bin == 0)) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO || MFB_IS_ZERO)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else {
thread_to_left_offset_cnt = split_default_to_left;
}
} else if (bin <= th) {
thread_to_left_offset_cnt = 1;
}
} else {
if (MISSING_IS_ZERO && !MFB_IS_ZERO && bin == t_zero_bin) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if (bin != max_bin) {
if ((MISSING_IS_NA && MFB_IS_NA) || (MISSING_IS_ZERO && MFB_IS_ZERO)) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else {
thread_to_left_offset_cnt = split_default_to_left;
}
} else {
if (MISSING_IS_NA && !MFB_IS_NA) {
thread_to_left_offset_cnt = split_missing_default_to_left;
} else if (MAX_TO_LEFT) {
thread_to_left_offset_cnt = 1;
}
}
}
}
__syncthreads();
PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer,
thread_to_left_offset_cnt, shared_mem_buffer);
}
template <typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner(
GenDataToLeftBitVectorKernel_PARMS,
const bool missing_is_zero,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (min_bin < max_bin) {
if (!missing_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner0<false, false, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner0<false, true, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
} else {
if (!missing_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner0<true, false, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner0<true, true, BIN_TYPE>
(GenBitVector_ARGS, missing_is_na, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner0(
GenDataToLeftBitVectorKernel_PARMS,
const bool missing_is_na,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!missing_is_na) {
LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, false, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner1<MIN_IS_MAX, MISSING_IS_ZERO, true, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_zero, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner1(
GenDataToLeftBitVectorKernel_PARMS,
const bool mfb_is_zero,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_zero) {
LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, false, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner2<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, true, BIN_TYPE>
(GenBitVector_ARGS, mfb_is_na, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner2(
GenDataToLeftBitVectorKernel_PARMS,
const bool mfb_is_na,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!mfb_is_na) {
LaunchGenDataToLeftBitVectorKernelInner3
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, false, BIN_TYPE>
(GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner3
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, true, BIN_TYPE>
(GenBitVector_ARGS, max_bin_to_left, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner3(
GenDataToLeftBitVectorKernel_PARMS,
const bool max_bin_to_left,
const bool is_single_feature_in_column) {
if (!max_bin_to_left) {
LaunchGenDataToLeftBitVectorKernelInner4
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, false, BIN_TYPE>
(GenBitVector_ARGS, is_single_feature_in_column);
} else {
LaunchGenDataToLeftBitVectorKernelInner4
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, true, BIN_TYPE>
(GenBitVector_ARGS, is_single_feature_in_column);
}
}
template <bool MIN_IS_MAX, bool MISSING_IS_ZERO, bool MISSING_IS_NA, bool MFB_IS_ZERO, bool MFB_IS_NA, bool MAX_TO_LEFT, typename BIN_TYPE>
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernelInner4(
GenDataToLeftBitVectorKernel_PARMS,
const bool is_single_feature_in_column) {
if (!is_single_feature_in_column) {
GenDataToLeftBitVectorKernel
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, true, BIN_TYPE>
<<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_ARGS,
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_);
} else {
GenDataToLeftBitVectorKernel
<MIN_IS_MAX, MISSING_IS_ZERO, MISSING_IS_NA, MFB_IS_ZERO, MFB_IS_NA, MAX_TO_LEFT, false, BIN_TYPE>
<<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_ARGS,
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_);
}
}
void CUDADataPartition::LaunchGenDataToLeftBitVectorKernel(
const data_size_t num_data_in_leaf,
const int split_feature_index,
const uint32_t split_threshold,
const uint8_t split_default_left,
const data_size_t leaf_data_start,
const int left_leaf_index,
const int right_leaf_index) {
const bool missing_is_zero = static_cast<bool>(cuda_column_data_->feature_missing_is_zero(split_feature_index));
const bool missing_is_na = static_cast<bool>(cuda_column_data_->feature_missing_is_na(split_feature_index));
const bool mfb_is_zero = static_cast<bool>(cuda_column_data_->feature_mfb_is_zero(split_feature_index));
const bool mfb_is_na = static_cast<bool>(cuda_column_data_->feature_mfb_is_na(split_feature_index));
const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index];
const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index);
const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index);
const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index);
const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index);
uint32_t th = split_threshold + min_bin;
uint32_t t_zero_bin = min_bin + default_bin;
if (most_freq_bin == 0) {
--th;
--t_zero_bin;
}
uint8_t split_default_to_left = 0;
uint8_t split_missing_default_to_left = 0;
int default_leaf_index = right_leaf_index;
int missing_default_leaf_index = right_leaf_index;
if (most_freq_bin <= split_threshold) {
split_default_to_left = 1;
default_leaf_index = left_leaf_index;
}
if (missing_is_zero || missing_is_na) {
if (split_default_left) {
split_missing_default_to_left = 1;
missing_default_leaf_index = left_leaf_index;
}
}
const int column_index = cuda_column_data_->feature_to_column(split_feature_index);
const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index);
const bool max_bin_to_left = (max_bin <= th);
const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start;
const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index);
if (bit_type == 8) {
const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint8_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint8_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
} else if (bit_type == 16) {
const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint16_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint16_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
} else if (bit_type == 32) {
const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer);
LaunchGenDataToLeftBitVectorKernelInner<uint32_t>(
GenBitVector_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
LaunchUpdateDataIndexToLeafIndexKernel<uint32_t>(
UpdateDataIndexToLeafIndex_ARGS,
missing_is_zero,
missing_is_na,
mfb_is_zero,
mfb_is_na,
max_bin_to_left,
is_single_feature_in_column);
}
}
#undef UpdateDataIndexToLeafIndexKernel_PARAMS
#undef UpdateDataIndexToLeafIndex_ARGS
#undef GenDataToLeftBitVectorKernel_PARMS
#undef GenBitVector_ARGS
template <typename BIN_TYPE, bool USE_MIN_BIN>
__global__ void UpdateDataIndexToLeafIndexKernel_Categorical(
const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf,
const uint32_t* bitset, const int bitset_len, const BIN_TYPE* column_data,
// values from feature
const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset,
int* cuda_data_index_to_leaf_index, const int left_leaf_index, const int right_leaf_index,
const int default_leaf_index) {
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
} else if (!USE_MIN_BIN && bin == 0) {
cuda_data_index_to_leaf_index[global_data_index] = default_leaf_index;
} else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) {
cuda_data_index_to_leaf_index[global_data_index] = left_leaf_index;
} else {
cuda_data_index_to_leaf_index[global_data_index] = right_leaf_index;
}
}
}
// for categorical features
template <typename BIN_TYPE, bool USE_MIN_BIN>
__global__ void GenDataToLeftBitVectorKernel_Categorical(
const data_size_t num_data_in_leaf, const data_size_t* data_indices_in_leaf,
const uint32_t* bitset, int bitset_len, const BIN_TYPE* column_data,
// values from feature
const uint32_t max_bin, const uint32_t min_bin, const int8_t mfb_offset,
const uint8_t split_default_to_left,
uint16_t* block_to_left_offset,
data_size_t* block_to_left_offset_buffer, data_size_t* block_to_right_offset_buffer) {
__shared__ uint16_t shared_mem_buffer[32];
uint16_t thread_to_left_offset_cnt = 0;
const unsigned int local_data_index = blockIdx.x * blockDim.x + threadIdx.x;
if (local_data_index < num_data_in_leaf) {
const unsigned int global_data_index = data_indices_in_leaf[local_data_index];
const uint32_t bin = static_cast<uint32_t>(column_data[global_data_index]);
if (USE_MIN_BIN && (bin < min_bin || bin > max_bin)) {
thread_to_left_offset_cnt = split_default_to_left;
} else if (!USE_MIN_BIN && bin == 0) {
thread_to_left_offset_cnt = split_default_to_left;
} else if (CUDAFindInBitset(bitset, bitset_len, bin - min_bin + mfb_offset)) {
thread_to_left_offset_cnt = 1;
}
}
__syncthreads();
PrepareOffset(num_data_in_leaf, block_to_left_offset + blockIdx.x * blockDim.x, block_to_left_offset_buffer, block_to_right_offset_buffer,
thread_to_left_offset_cnt, shared_mem_buffer);
}
#define GenBitVector_Categorical_ARGS \
num_data_in_leaf, data_indices_in_leaf, \
bitset, bitset_len, \
column_data, max_bin, min_bin, mfb_offset, split_default_to_left, \
cuda_block_to_left_offset_, cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_
#define UpdateDataIndexToLeafIndex_Categorical_ARGS \
num_data_in_leaf, data_indices_in_leaf, \
bitset, bitset_len, \
column_data, max_bin, min_bin, mfb_offset, \
cuda_data_index_to_leaf_index_, left_leaf_index, right_leaf_index, default_leaf_index
void CUDADataPartition::LaunchGenDataToLeftBitVectorCategoricalKernel(
const data_size_t num_data_in_leaf,
const int split_feature_index,
const uint32_t* bitset,
const int bitset_len,
const uint8_t split_default_left,
const data_size_t leaf_data_start,
const int left_leaf_index,
const int right_leaf_index) {
const data_size_t* data_indices_in_leaf = cuda_data_indices_ + leaf_data_start;
const int column_index = cuda_column_data_->feature_to_column(split_feature_index);
const uint8_t bit_type = cuda_column_data_->column_bit_type(column_index);
const bool is_single_feature_in_column = is_single_feature_in_column_[split_feature_index];
const uint32_t min_bin = is_single_feature_in_column ? 1 : cuda_column_data_->feature_min_bin(split_feature_index);
const uint32_t max_bin = cuda_column_data_->feature_max_bin(split_feature_index);
const uint32_t most_freq_bin = cuda_column_data_->feature_most_freq_bin(split_feature_index);
const uint32_t default_bin = cuda_column_data_->feature_default_bin(split_feature_index);
const void* column_data_pointer = cuda_column_data_->GetColumnData(column_index);
const int8_t mfb_offset = static_cast<int8_t>(most_freq_bin == 0);
std::vector<uint32_t> host_bitset(bitset_len, 0);
CopyFromCUDADeviceToHost<uint32_t>(host_bitset.data(), bitset, bitset_len, __FILE__, __LINE__);
uint8_t split_default_to_left = 0;
int default_leaf_index = right_leaf_index;
if (most_freq_bin > 0 && Common::FindInBitset(host_bitset.data(), bitset_len, most_freq_bin)) {
split_default_to_left = 1;
default_leaf_index = left_leaf_index;
}
if (bit_type == 8) {
const uint8_t* column_data = reinterpret_cast<const uint8_t*>(column_data_pointer);
if (is_single_feature_in_column) {
GenDataToLeftBitVectorKernel_Categorical<uint8_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
GenDataToLeftBitVectorKernel_Categorical<uint8_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint8_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
} else if (bit_type == 16) {
const uint16_t* column_data = reinterpret_cast<const uint16_t*>(column_data_pointer);
if (is_single_feature_in_column) {
GenDataToLeftBitVectorKernel_Categorical<uint16_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
GenDataToLeftBitVectorKernel_Categorical<uint16_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint16_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
} else if (bit_type == 32) {
const uint32_t* column_data = reinterpret_cast<const uint32_t*>(column_data_pointer);
if (is_single_feature_in_column) {
GenDataToLeftBitVectorKernel_Categorical<uint32_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, false><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
} else {
GenDataToLeftBitVectorKernel_Categorical<uint32_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[0]>>>(GenBitVector_Categorical_ARGS);
UpdateDataIndexToLeafIndexKernel_Categorical<uint32_t, true><<<grid_dim_, block_dim_, 0, cuda_streams_[3]>>>(UpdateDataIndexToLeafIndex_Categorical_ARGS);
}
}
}
#undef GenBitVector_Categorical_ARGS
#undef UpdateDataIndexToLeafIndex_Categorical_ARGS
__global__ void AggregateBlockOffsetKernel0(
const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const data_size_t num_blocks) {
__shared__ uint32_t shared_mem_buffer[32];
__shared__ uint32_t to_left_total_count;
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index];
const unsigned int blockDim_x = blockDim.x;
const unsigned int threadIdx_x = threadIdx.x;
const data_size_t num_blocks_plus_1 = num_blocks + 1;
const uint32_t num_blocks_per_thread = (num_blocks_plus_1 + blockDim_x - 1) / blockDim_x;
const uint32_t remain = num_blocks_plus_1 - ((num_blocks_per_thread - 1) * blockDim_x);
const uint32_t remain_offset = remain * num_blocks_per_thread;
uint32_t thread_start_block_index = 0;
uint32_t thread_end_block_index = 0;
if (threadIdx_x < remain) {
thread_start_block_index = threadIdx_x * num_blocks_per_thread;
thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread, num_blocks_plus_1);
} else {
thread_start_block_index = remain_offset + (num_blocks_per_thread - 1) * (threadIdx_x - remain);
thread_end_block_index = min(thread_start_block_index + num_blocks_per_thread - 1, num_blocks_plus_1);
}
if (threadIdx.x == 0) {
block_to_right_offset_buffer[0] = 0;
}
__syncthreads();
for (uint32_t block_index = thread_start_block_index + 1; block_index < thread_end_block_index; ++block_index) {
block_to_left_offset_buffer[block_index] += block_to_left_offset_buffer[block_index - 1];
block_to_right_offset_buffer[block_index] += block_to_right_offset_buffer[block_index - 1];
}
__syncthreads();
uint32_t block_to_left_offset = 0;
uint32_t block_to_right_offset = 0;
if (thread_start_block_index < thread_end_block_index && thread_start_block_index > 1) {
block_to_left_offset = block_to_left_offset_buffer[thread_start_block_index - 1];
block_to_right_offset = block_to_right_offset_buffer[thread_start_block_index - 1];
}
block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer);
__syncthreads();
block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer);
if (threadIdx_x == blockDim_x - 1) {
to_left_total_count = block_to_left_offset + block_to_left_offset_buffer[num_blocks];
}
__syncthreads();
const uint32_t to_left_thread_block_offset = block_to_left_offset;
const uint32_t to_right_thread_block_offset = block_to_right_offset + to_left_total_count;
for (uint32_t block_index = thread_start_block_index; block_index < thread_end_block_index; ++block_index) {
block_to_left_offset_buffer[block_index] += to_left_thread_block_offset;
block_to_right_offset_buffer[block_index] += to_right_thread_block_offset;
}
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count);
cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count);
cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end;
cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count);
}
}
__global__ void AggregateBlockOffsetKernel1(
const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const data_size_t num_blocks) {
__shared__ uint32_t shared_mem_buffer[32];
__shared__ uint32_t to_left_total_count;
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index];
const unsigned int threadIdx_x = threadIdx.x;
uint32_t block_to_left_offset = 0;
uint32_t block_to_right_offset = 0;
if (threadIdx_x < static_cast<unsigned int>(num_blocks)) {
block_to_left_offset = block_to_left_offset_buffer[threadIdx_x + 1];
block_to_right_offset = block_to_right_offset_buffer[threadIdx_x + 1];
}
block_to_left_offset = ShufflePrefixSum<uint32_t>(block_to_left_offset, shared_mem_buffer);
__syncthreads();
block_to_right_offset = ShufflePrefixSum<uint32_t>(block_to_right_offset, shared_mem_buffer);
if (threadIdx.x == blockDim.x - 1) {
to_left_total_count = block_to_left_offset;
}
__syncthreads();
if (threadIdx_x < static_cast<unsigned int>(num_blocks)) {
block_to_left_offset_buffer[threadIdx_x + 1] = block_to_left_offset;
block_to_right_offset_buffer[threadIdx_x + 1] = block_to_right_offset + to_left_total_count;
}
if (threadIdx_x == 0) {
block_to_right_offset_buffer[0] = to_left_total_count;
}
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
const data_size_t old_leaf_data_end = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[left_leaf_index] = cuda_leaf_data_start[left_leaf_index] + static_cast<data_size_t>(to_left_total_count);
cuda_leaf_num_data[left_leaf_index] = static_cast<data_size_t>(to_left_total_count);
cuda_leaf_data_start[right_leaf_index] = cuda_leaf_data_end[left_leaf_index];
cuda_leaf_data_end[right_leaf_index] = old_leaf_data_end;
cuda_leaf_num_data[right_leaf_index] = num_data_in_leaf - static_cast<data_size_t>(to_left_total_count);
}
}
__global__ void SplitTreeStructureKernel(const int left_leaf_index,
const int right_leaf_index,
data_size_t* block_to_left_offset_buffer,
data_size_t* block_to_right_offset_buffer, data_size_t* cuda_leaf_data_start,
data_size_t* cuda_leaf_data_end, data_size_t* cuda_leaf_num_data, const data_size_t* cuda_data_indices,
const CUDASplitInfo* best_split_info,
// for leaf splits information update
CUDALeafSplitsStruct* smaller_leaf_splits,
CUDALeafSplitsStruct* larger_leaf_splits,
const int num_total_bin,
hist_t* cuda_hist, hist_t** cuda_hist_pool,
double* cuda_leaf_output,
int* cuda_split_info_buffer) {
const unsigned int to_left_total_cnt = cuda_leaf_num_data[left_leaf_index];
double* cuda_split_info_buffer_for_hessians = reinterpret_cast<double*>(cuda_split_info_buffer + 8);
const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx.x;
if (global_thread_index == 0) {
cuda_leaf_output[left_leaf_index] = best_split_info->left_value;
} else if (global_thread_index == 1) {
cuda_leaf_output[right_leaf_index] = best_split_info->right_value;
} else if (global_thread_index == 2) {
cuda_split_info_buffer[0] = left_leaf_index;
} else if (global_thread_index == 3) {
cuda_split_info_buffer[1] = cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 4) {
cuda_split_info_buffer[2] = cuda_leaf_data_start[left_leaf_index];
} else if (global_thread_index == 5) {
cuda_split_info_buffer[3] = right_leaf_index;
} else if (global_thread_index == 6) {
cuda_split_info_buffer[4] = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 7) {
cuda_split_info_buffer[5] = cuda_leaf_data_start[right_leaf_index];
} else if (global_thread_index == 8) {
cuda_split_info_buffer_for_hessians[0] = best_split_info->left_sum_hessians;
cuda_split_info_buffer_for_hessians[2] = best_split_info->left_sum_gradients;
} else if (global_thread_index == 9) {
cuda_split_info_buffer_for_hessians[1] = best_split_info->right_sum_hessians;
cuda_split_info_buffer_for_hessians[3] = best_split_info->right_sum_gradients;
}
if (cuda_leaf_num_data[left_leaf_index] < cuda_leaf_num_data[right_leaf_index]) {
if (global_thread_index == 0) {
hist_t* parent_hist_ptr = cuda_hist_pool[left_leaf_index];
cuda_hist_pool[right_leaf_index] = parent_hist_ptr;
cuda_hist_pool[left_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin;
smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index];
larger_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index];
} else if (global_thread_index == 1) {
smaller_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients;
} else if (global_thread_index == 2) {
smaller_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians;
} else if (global_thread_index == 3) {
smaller_leaf_splits->num_data_in_leaf = to_left_total_cnt;
} else if (global_thread_index == 4) {
smaller_leaf_splits->gain = best_split_info->left_gain;
} else if (global_thread_index == 5) {
smaller_leaf_splits->leaf_value = best_split_info->left_value;
} else if (global_thread_index == 6) {
smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices;
} else if (global_thread_index == 7) {
larger_leaf_splits->leaf_index = right_leaf_index;
} else if (global_thread_index == 8) {
larger_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients;
} else if (global_thread_index == 9) {
larger_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians;
} else if (global_thread_index == 10) {
larger_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 11) {
larger_leaf_splits->gain = best_split_info->right_gain;
} else if (global_thread_index == 12) {
larger_leaf_splits->leaf_value = best_split_info->right_value;
} else if (global_thread_index == 13) {
larger_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 14) {
cuda_split_info_buffer[6] = left_leaf_index;
} else if (global_thread_index == 15) {
cuda_split_info_buffer[7] = right_leaf_index;
} else if (global_thread_index == 16) {
smaller_leaf_splits->leaf_index = left_leaf_index;
}
} else {
if (global_thread_index == 0) {
larger_leaf_splits->leaf_index = left_leaf_index;
} else if (global_thread_index == 1) {
larger_leaf_splits->sum_of_gradients = best_split_info->left_sum_gradients;
} else if (global_thread_index == 2) {
larger_leaf_splits->sum_of_hessians = best_split_info->left_sum_hessians;
} else if (global_thread_index == 3) {
larger_leaf_splits->num_data_in_leaf = to_left_total_cnt;
} else if (global_thread_index == 4) {
larger_leaf_splits->gain = best_split_info->left_gain;
} else if (global_thread_index == 5) {
larger_leaf_splits->leaf_value = best_split_info->left_value;
} else if (global_thread_index == 6) {
larger_leaf_splits->data_indices_in_leaf = cuda_data_indices;
} else if (global_thread_index == 7) {
smaller_leaf_splits->leaf_index = right_leaf_index;
} else if (global_thread_index == 8) {
smaller_leaf_splits->sum_of_gradients = best_split_info->right_sum_gradients;
} else if (global_thread_index == 9) {
smaller_leaf_splits->sum_of_hessians = best_split_info->right_sum_hessians;
} else if (global_thread_index == 10) {
smaller_leaf_splits->num_data_in_leaf = cuda_leaf_num_data[right_leaf_index];
} else if (global_thread_index == 11) {
smaller_leaf_splits->gain = best_split_info->right_gain;
} else if (global_thread_index == 12) {
smaller_leaf_splits->leaf_value = best_split_info->right_value;
} else if (global_thread_index == 13) {
smaller_leaf_splits->data_indices_in_leaf = cuda_data_indices + cuda_leaf_num_data[left_leaf_index];
} else if (global_thread_index == 14) {
cuda_hist_pool[right_leaf_index] = cuda_hist + 2 * right_leaf_index * num_total_bin;
smaller_leaf_splits->hist_in_leaf = cuda_hist_pool[right_leaf_index];
} else if (global_thread_index == 15) {
larger_leaf_splits->hist_in_leaf = cuda_hist_pool[left_leaf_index];
} else if (global_thread_index == 16) {
cuda_split_info_buffer[6] = right_leaf_index;
} else if (global_thread_index == 17) {
cuda_split_info_buffer[7] = left_leaf_index;
}
}
}
__global__ void SplitInnerKernel(const int left_leaf_index, const int right_leaf_index,
const data_size_t* cuda_leaf_data_start, const data_size_t* cuda_leaf_num_data,
const data_size_t* cuda_data_indices,
const data_size_t* block_to_left_offset_buffer, const data_size_t* block_to_right_offset_buffer,
const uint16_t* block_to_left_offset, data_size_t* out_data_indices_in_leaf) {
const data_size_t leaf_num_data_offset = cuda_leaf_data_start[left_leaf_index];
const data_size_t num_data_in_leaf = cuda_leaf_num_data[left_leaf_index] + cuda_leaf_num_data[right_leaf_index];
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockDim_x = blockDim.x;
const unsigned int global_thread_index = blockIdx.x * blockDim_x + threadIdx_x;
const data_size_t* cuda_data_indices_in_leaf = cuda_data_indices + leaf_num_data_offset;
const uint16_t* block_to_left_offset_ptr = block_to_left_offset + blockIdx.x * blockDim_x;
const uint32_t to_right_block_offset = block_to_right_offset_buffer[blockIdx.x];
const uint32_t to_left_block_offset = block_to_left_offset_buffer[blockIdx.x];
data_size_t* left_out_data_indices_in_leaf = out_data_indices_in_leaf + to_left_block_offset;
data_size_t* right_out_data_indices_in_leaf = out_data_indices_in_leaf + to_right_block_offset;
if (static_cast<data_size_t>(global_thread_index) < num_data_in_leaf) {
const uint32_t thread_to_left_offset = (threadIdx_x == 0 ? 0 : block_to_left_offset_ptr[threadIdx_x - 1]);
const bool to_left = block_to_left_offset_ptr[threadIdx_x] > thread_to_left_offset;
if (to_left) {
left_out_data_indices_in_leaf[thread_to_left_offset] = cuda_data_indices_in_leaf[global_thread_index];
} else {
const uint32_t thread_to_right_offset = threadIdx.x - thread_to_left_offset;
right_out_data_indices_in_leaf[thread_to_right_offset] = cuda_data_indices_in_leaf[global_thread_index];
}
}
}
__global__ void CopyDataIndicesKernel(
const data_size_t num_data_in_leaf,
const data_size_t* out_data_indices_in_leaf,
data_size_t* cuda_data_indices) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int global_thread_index = blockIdx.x * blockDim.x + threadIdx_x;
if (global_thread_index < num_data_in_leaf) {
cuda_data_indices[global_thread_index] = out_data_indices_in_leaf[global_thread_index];
}
}
void CUDADataPartition::LaunchSplitInnerKernel(
const data_size_t num_data_in_leaf,
const CUDASplitInfo* best_split_info,
const int left_leaf_index,
const int right_leaf_index,
// for leaf splits information update
CUDALeafSplitsStruct* smaller_leaf_splits,
CUDALeafSplitsStruct* larger_leaf_splits,
data_size_t* left_leaf_num_data_ref,
data_size_t* right_leaf_num_data_ref,
data_size_t* left_leaf_start_ref,
data_size_t* right_leaf_start_ref,
double* left_leaf_sum_of_hessians_ref,
double* right_leaf_sum_of_hessians_ref,
double* left_leaf_sum_of_gradients_ref,
double* right_leaf_sum_of_gradients_ref) {
int num_blocks_final_ref = grid_dim_ - 1;
int num_blocks_final_aligned = 1;
while (num_blocks_final_ref > 0) {
num_blocks_final_aligned <<= 1;
num_blocks_final_ref >>= 1;
}
global_timer.Start("CUDADataPartition::AggregateBlockOffsetKernel");
if (grid_dim_ > AGGREGATE_BLOCK_SIZE_DATA_PARTITION) {
AggregateBlockOffsetKernel0<<<1, AGGREGATE_BLOCK_SIZE_DATA_PARTITION, 0, cuda_streams_[0]>>>(
left_leaf_index,
right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_data_indices_,
grid_dim_);
} else {
AggregateBlockOffsetKernel1<<<1, num_blocks_final_aligned, 0, cuda_streams_[0]>>>(
left_leaf_index,
right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_data_indices_,
grid_dim_);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::AggregateBlockOffsetKernel");
global_timer.Start("CUDADataPartition::SplitInnerKernel");
SplitInnerKernel<<<grid_dim_, block_dim_, 0, cuda_streams_[1]>>>(
left_leaf_index, right_leaf_index, cuda_leaf_data_start_, cuda_leaf_num_data_, cuda_data_indices_,
cuda_block_data_to_left_offset_, cuda_block_data_to_right_offset_, cuda_block_to_left_offset_,
cuda_out_data_indices_in_leaf_);
global_timer.Stop("CUDADataPartition::SplitInnerKernel");
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Start("CUDADataPartition::SplitTreeStructureKernel");
SplitTreeStructureKernel<<<4, 5, 0, cuda_streams_[0]>>>(left_leaf_index, right_leaf_index,
cuda_block_data_to_left_offset_,
cuda_block_data_to_right_offset_, cuda_leaf_data_start_, cuda_leaf_data_end_,
cuda_leaf_num_data_, cuda_out_data_indices_in_leaf_,
best_split_info,
smaller_leaf_splits,
larger_leaf_splits,
num_total_bin_,
cuda_hist_,
cuda_hist_pool_,
cuda_leaf_output_, cuda_split_info_buffer_);
global_timer.Stop("CUDADataPartition::SplitTreeStructureKernel");
std::vector<int> cpu_split_info_buffer(16);
const double* cpu_sum_hessians_info = reinterpret_cast<const double*>(cpu_split_info_buffer.data() + 8);
global_timer.Start("CUDADataPartition::CopyFromCUDADeviceToHostAsync");
CopyFromCUDADeviceToHostAsync<int>(cpu_split_info_buffer.data(), cuda_split_info_buffer_, 16, cuda_streams_[0], __FILE__, __LINE__);
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::CopyFromCUDADeviceToHostAsync");
const data_size_t left_leaf_num_data = cpu_split_info_buffer[1];
const data_size_t left_leaf_data_start = cpu_split_info_buffer[2];
const data_size_t right_leaf_num_data = cpu_split_info_buffer[4];
global_timer.Start("CUDADataPartition::CopyDataIndicesKernel");
CopyDataIndicesKernel<<<grid_dim_, block_dim_, 0, cuda_streams_[2]>>>(
left_leaf_num_data + right_leaf_num_data, cuda_out_data_indices_in_leaf_, cuda_data_indices_ + left_leaf_data_start);
global_timer.Stop("CUDADataPartition::CopyDataIndicesKernel");
const data_size_t right_leaf_data_start = cpu_split_info_buffer[5];
*left_leaf_num_data_ref = left_leaf_num_data;
*left_leaf_start_ref = left_leaf_data_start;
*right_leaf_num_data_ref = right_leaf_num_data;
*right_leaf_start_ref = right_leaf_data_start;
*left_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[0];
*right_leaf_sum_of_hessians_ref = cpu_sum_hessians_info[1];
*left_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[2];
*right_leaf_sum_of_gradients_ref = cpu_sum_hessians_info[3];
}
template <bool USE_BAGGING>
__global__ void AddPredictionToScoreKernel(
const data_size_t* data_indices_in_leaf,
const double* leaf_value, double* cuda_scores,
const int* cuda_data_index_to_leaf_index, const data_size_t num_data) {
const unsigned int threadIdx_x = threadIdx.x;
const unsigned int blockIdx_x = blockIdx.x;
const unsigned int blockDim_x = blockDim.x;
const data_size_t local_data_index = static_cast<data_size_t>(blockIdx_x * blockDim_x + threadIdx_x);
if (local_data_index < num_data) {
if (USE_BAGGING) {
const data_size_t global_data_index = data_indices_in_leaf[local_data_index];
const int leaf_index = cuda_data_index_to_leaf_index[global_data_index];
const double leaf_prediction_value = leaf_value[leaf_index];
cuda_scores[global_data_index] += leaf_prediction_value;
} else {
const int leaf_index = cuda_data_index_to_leaf_index[local_data_index];
const double leaf_prediction_value = leaf_value[leaf_index];
cuda_scores[local_data_index] += leaf_prediction_value;
}
}
}
void CUDADataPartition::LaunchAddPredictionToScoreKernel(const double* leaf_value, double* cuda_scores) {
global_timer.Start("CUDADataPartition::AddPredictionToScoreKernel");
const data_size_t num_data_in_root = root_num_data();
const int num_blocks = (num_data_in_root + FILL_INDICES_BLOCK_SIZE_DATA_PARTITION - 1) / FILL_INDICES_BLOCK_SIZE_DATA_PARTITION;
if (use_bagging_) {
AddPredictionToScoreKernel<true><<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(
cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root);
} else {
AddPredictionToScoreKernel<false><<<num_blocks, FILL_INDICES_BLOCK_SIZE_DATA_PARTITION>>>(
cuda_data_indices_, leaf_value, cuda_scores, cuda_data_index_to_leaf_index_, num_data_in_root);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
global_timer.Stop("CUDADataPartition::AddPredictionToScoreKernel");
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
29e54ad42ba04b8d8dba2ef44a88cf926d5944c2.hip | // !!! This is a file automatically generated by hipify!!!
//GPURSQFBatchInsertsPerf.cu
#include <stdio.h>
#include <assert.h>
#include "../../mt19937ar.h"
#include "RSQF.cuh"
void generateRandomNumbers(unsigned int *numberArray, unsigned int n)
{
for (int i = 0; i < n; i++){
numberArray[i] = genrand_int32();
}
}
int main(int argc, char* argv[])
{
assert(argc == 4);
int q = atoi(argv[1]);
float alpha = atof(argv[2]);
int batchSize = atoi(argv[3]);
//Initialize the filter:
struct countingQuotientFilterGPU test_cqf_gpu;
initCQFGPU(&test_cqf_gpu, q);
//Generate random numbers:
unsigned int numValues = alpha * (1 << q);
unsigned int* h_randomValues = new unsigned int[numValues];
init_genrand(time(NULL)); //initialize random number generator
generateRandomNumbers(h_randomValues, numValues);
unsigned int* d_randomValues;
hipMalloc((void**) &d_randomValues, numValues * sizeof(unsigned int));
hipMemcpy(d_randomValues, h_randomValues, numValues * sizeof(unsigned int), hipMemcpyHostToDevice);
//Inserts
//Allocate output array
int* d_insertReturnValues;
hipMalloc((void**) &d_insertReturnValues, numValues * sizeof(int));
//Insert kernel
float insertTime = insertGPU(test_cqf_gpu, numValues, d_randomValues, d_insertReturnValues);
//Insert new batch
unsigned int* h_newBatch = new unsigned int[batchSize];
generateRandomNumbers(h_newBatch, batchSize);
unsigned int* d_newBatch;
hipMalloc((void**) &d_newBatch, batchSize * sizeof(unsigned int));
hipMemcpy(d_newBatch, h_newBatch, batchSize * sizeof(unsigned int), hipMemcpyHostToDevice);
int* d_batchInsertReturnValues;
hipMalloc((void**) &d_batchInsertReturnValues, batchSize * sizeof(int));
float batchInsertTime = insertGPU(test_cqf_gpu, batchSize, d_newBatch, d_batchInsertReturnValues);
printf("%f\n", batchSize / batchInsertTime / 1000);
//Free Memory
hipFree(test_cqf_gpu.blocks);
delete[] h_randomValues;
hipFree(d_randomValues);
hipFree(d_insertReturnValues);
hipFree(d_batchInsertReturnValues);
delete[] h_newBatch;
hipFree(d_newBatch);
hipDeviceReset();
return 0;
}
| 29e54ad42ba04b8d8dba2ef44a88cf926d5944c2.cu | //GPURSQFBatchInsertsPerf.cu
#include <stdio.h>
#include <assert.h>
#include "../../mt19937ar.h"
#include "RSQF.cuh"
void generateRandomNumbers(unsigned int *numberArray, unsigned int n)
{
for (int i = 0; i < n; i++){
numberArray[i] = genrand_int32();
}
}
int main(int argc, char* argv[])
{
assert(argc == 4);
int q = atoi(argv[1]);
float alpha = atof(argv[2]);
int batchSize = atoi(argv[3]);
//Initialize the filter:
struct countingQuotientFilterGPU test_cqf_gpu;
initCQFGPU(&test_cqf_gpu, q);
//Generate random numbers:
unsigned int numValues = alpha * (1 << q);
unsigned int* h_randomValues = new unsigned int[numValues];
init_genrand(time(NULL)); //initialize random number generator
generateRandomNumbers(h_randomValues, numValues);
unsigned int* d_randomValues;
cudaMalloc((void**) &d_randomValues, numValues * sizeof(unsigned int));
cudaMemcpy(d_randomValues, h_randomValues, numValues * sizeof(unsigned int), cudaMemcpyHostToDevice);
//Inserts
//Allocate output array
int* d_insertReturnValues;
cudaMalloc((void**) &d_insertReturnValues, numValues * sizeof(int));
//Insert kernel
float insertTime = insertGPU(test_cqf_gpu, numValues, d_randomValues, d_insertReturnValues);
//Insert new batch
unsigned int* h_newBatch = new unsigned int[batchSize];
generateRandomNumbers(h_newBatch, batchSize);
unsigned int* d_newBatch;
cudaMalloc((void**) &d_newBatch, batchSize * sizeof(unsigned int));
cudaMemcpy(d_newBatch, h_newBatch, batchSize * sizeof(unsigned int), cudaMemcpyHostToDevice);
int* d_batchInsertReturnValues;
cudaMalloc((void**) &d_batchInsertReturnValues, batchSize * sizeof(int));
float batchInsertTime = insertGPU(test_cqf_gpu, batchSize, d_newBatch, d_batchInsertReturnValues);
printf("%f\n", batchSize / batchInsertTime / 1000);
//Free Memory
cudaFree(test_cqf_gpu.blocks);
delete[] h_randomValues;
cudaFree(d_randomValues);
cudaFree(d_insertReturnValues);
cudaFree(d_batchInsertReturnValues);
delete[] h_newBatch;
cudaFree(d_newBatch);
cudaDeviceReset();
return 0;
}
|
7d5d737f603e885bacb79b7f366be16edc2df07f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2016 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cudnn.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
// Reference outputs (calculated on an M40 GPU)
// > ./RNN 20 2 512 64 0
// Forward: 1299 GFLOPs
// Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs)
// i checksum 1.315793E+06 h checksum 1.315212E+05
// di checksum 6.676003E+01 dh checksum 6.425067E+01
// dw checksum 1.453750E+09
//
// > ./RNN 20 2 512 64 1
// Forward: 1296 GFLOPs
// Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs)
// i checksum 6.319591E+05 h checksum 6.319605E+04
// di checksum 4.501830E+00 dh checksum 4.489546E+00
// dw checksum 5.012598E+07
//
// > ./RNN 20 2 512 64 2
// Forward: 2635 GFLOPs
// Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs)
// i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04
// di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01
// dw checksum 4.313461E+08
//
// > ./RNN 20 2 512 64 3
// Forward: 2428 GFLOPs
// Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs)
// i checksum 6.358978E+05 h checksum 6.281680E+04
// di checksum 6.296622E+00 dh checksum 2.289960E+05
// dw checksum 5.397419E+07
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); }
void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) {
if (stat != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line);
}
}
__global__ void initGPUData_ker(float *data, int numElements, float value) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numElements) {
data[tid] = value;
}
}
void initGPUData(float *data, int numElements, float value) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( initGPUData_ker) , dim3(gridDim), dim3(blockDim) , 0, 0, data, numElements, value);
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int inputSize;
int miniBatch;
float dropout;
bool bidirectional;
int mode;
int persistent;
FILE *fp;
fp=fopen("result.txt","w");
if (argc == 6) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
inputSize = hiddenSize;
miniBatch = atoi(argv[4]);
dropout = 0;
bidirectional = 0;
mode = atoi(argv[5]);
persistent = 0;
}
else {
printf("Usage:\n");
printf("./RNN <seqLength> <numLayers> <hiddenSize> <miniBatch> <mode>\n");
printf("Modes: 0 = RNN_RELU, 1 = RNN_TANH, 2 = LSTM, 3 = GRU\n");
return 1;
}
// -------------------------
// Create cudnn context
// -------------------------
cudnnHandle_t cudnnHandle;
cudnnErrCheck(cudnnCreate(&cudnnHandle));
// -------------------------
// Set up inputs and outputs
// -------------------------
void *x;
void *hx = NULL;
void *cx = NULL;
void *dx;
void *dhx = NULL;
void *dcx = NULL;
void *y;
void *hy = NULL;
void *cy = NULL;
void *dy;
void *dhy = NULL;
void *dcy = NULL;
// Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL.
cudaErrCheck(hipMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
// Set up tensor descriptors. x/y/dx/dy are arrays, one per time step.
cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc;
cudnnTensorDescriptor_t hxDesc, cxDesc;
cudnnTensorDescriptor_t hyDesc, cyDesc;
cudnnTensorDescriptor_t dhxDesc, dcxDesc;
cudnnTensorDescriptor_t dhyDesc, dcyDesc;
xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
int dimA[3];
int strideA[3];
// In this example dimA[1] is constant across the whole sequence
// This isn't required, all that is required is that it does not increase.
for (int i = 0; i < seqLength; i++) {
cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i]));
dimA[0] = miniBatch;
dimA[1] = inputSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
dimA[0] = miniBatch;
dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
}
dimA[0] = numLayers * (bidirectional ? 2 : 1);
dimA[1] = miniBatch;
dimA[2] = hiddenSize;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
// -------------------------
// Set up the dropout descriptor (needed for the RNN descriptor)
// -------------------------
unsigned long long seed = 1337ull; // Pick a seed.
cudnnDropoutDescriptor_t dropoutDesc;
cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize));
cudaErrCheck(hipMalloc(&states, stateSize));
cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc,
cudnnHandle,
dropout,
states,
stateSize,
seed));
// -------------------------
// Set up the RNN descriptor
// -------------------------
cudnnRNNDescriptor_t rnnDesc;
miopenRNNMode_t RNNMode;
cudnnRNNAlgo_t RNNAlgo;
cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc));
if (mode == 0) RNNMode = miopenRNNRELU;
else if (mode == 1) RNNMode = miopenRNNTANH;
else if (mode == 2) RNNMode = miopenLSTM;
else if (mode == 3) RNNMode = miopenGRU;
// Persistent RNNs are only supported on Pascal+ GPUs.
if (persistent == 0) RNNAlgo = CUDNN_RNN_ALGO_STANDARD;
else if (persistent == 1) RNNAlgo = CUDNN_RNN_ALGO_PERSIST_STATIC;
else if (persistent == 2) RNNAlgo = CUDNN_RNN_ALGO_PERSIST_DYNAMIC;
cudnnErrCheck(cudnnSetRNNDescriptor_v6(cudnnHandle,
rnnDesc,
hiddenSize,
numLayers,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL,
RNNMode,
RNNAlgo, // Can be changed to use persistent RNNs on Pascal+ GPUs.
CUDNN_DATA_FLOAT));
// -------------------------
// Set up parameters
// -------------------------
// This needs to be done after the rnn descriptor is set as otherwise
// we don't know how many parameters we have to allocate
void *w;
void *dw;
cudnnFilterDescriptor_t wDesc, dwDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc));
cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc));
size_t weightsSize;
cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT));
int dimW[3];
dimW[0] = weightsSize / sizeof(float);
dimW[1] = 1;
dimW[2] = 1;
cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudaErrCheck(hipMalloc((void**)&w, weightsSize));
cudaErrCheck(hipMalloc((void**)&dw, weightsSize));
// -------------------------
// Set up work space and reserved memory
// -------------------------
void *workspace;
void *reserveSpace;
size_t workSize;
size_t reserveSize;
// Need for every pass
cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize));
// Only needed in training, shouldn't be touched between passes.
cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize));
cudaErrCheck(hipMalloc((void**)&workspace, workSize));
cudaErrCheck(hipMalloc((void**)&reserveSpace, reserveSize));
// *********************************************************************************************************
// Initialise weights and inputs
// *********************************************************************************************************
// We initialise to something simple.
// Matrices are initialised to 1 / matrixSize, biases to 1, data is 1.
initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f);
if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
// Weights
int numLinearLayers = 0;
if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) {
numLinearLayers = 2;
}
else if (RNNMode == miopenLSTM) {
numLinearLayers = 8;
}
else if (RNNMode == miopenGRU) {
numLinearLayers = 6;
}
for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) {
for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) {
cudnnFilterDescriptor_t linLayerMatDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc));
float *linLayerMat;
cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerMatDesc,
(void**)&linLayerMat));
cudnnDataType_t dataType;
cudnnTensorFormat_t format;
int nbDims;
int filterDimA[3];
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2]));
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc));
cudnnFilterDescriptor_t linLayerBiasDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc));
float *linLayerBias;
cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerBiasDesc,
(void**)&linLayerBias));
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f);
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc));
}
}
// *********************************************************************************************************
// Dynamic persistent RNN plan (if using this algo)
// *********************************************************************************************************
cudnnPersistentRNNPlan_t rnnPlan;
if (RNNAlgo == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) {
// Note: This step is expensive. Once completed the plan can be reused so long as the descriptor
// minibatch or datatype don't change.
cudnnErrCheck(cudnnCreatePersistentRNNPlan(rnnDesc, miniBatch, CUDNN_DATA_FLOAT, &rnnPlan));
// Tell calls using this descriptor which plan to use.
cudnnErrCheck(cudnnSetPersistentRNNPlan(rnnDesc, rnnPlan));
}
// *********************************************************************************************************
// At this point all of the setup is done. We now need to pass through the RNN.
// *********************************************************************************************************
cudaErrCheck(hipDeviceSynchronize());
hipEvent_t start, stop;
float timeForward, timeBackward1, timeBackward2;
cudaErrCheck(hipEventCreate(&start));
cudaErrCheck(hipEventCreate(&stop));
cudaErrCheck(hipEventRecord(start));
// If we're not training we use this instead
// cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle,
// rnnDesc,
// seqLength,
// xDesc,
// x,
// hxDesc,
// hx,
// cxDesc,
// cx,
// wDesc,
// w,
// yDesc,
// y,
// hyDesc,
// hy,
// cyDesc,
// cy,
// workspace,
// workSize));
cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
cxDesc,
cx,
wDesc,
w,
yDesc,
y,
hyDesc,
hy,
cyDesc,
cy,
workspace,
workSize,
reserveSpace,
reserveSize));
cudaErrCheck(hipEventRecord(stop));
cudaErrCheck(hipEventSynchronize(stop));
cudaErrCheck(hipEventElapsedTime(&timeForward, start, stop));
cudaErrCheck(hipEventRecord(start));
cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle,
rnnDesc,
seqLength,
yDesc,
y,
dyDesc,
dy,
dhyDesc,
dhy,
dcyDesc,
dcy,
wDesc,
w,
hxDesc,
hx,
cxDesc,
cx,
dxDesc,
dx,
dhxDesc,
dhx,
dcxDesc,
dcx,
workspace,
workSize,
reserveSpace,
reserveSize ));
cudaErrCheck(hipEventRecord(stop));
cudaErrCheck(hipEventSynchronize(stop));
cudaErrCheck(hipEventElapsedTime(&timeBackward1, start, stop));
cudaErrCheck(hipEventRecord(start));
// cudnnRNNBackwardWeights adds to the data in dw.
cudaErrCheck(hipMemset(dw, 0, weightsSize));
cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
yDesc,
y,
workspace,
workSize,
dwDesc,
dw,
reserveSpace,
reserveSize ));
cudaErrCheck(hipEventRecord(stop));
cudaErrCheck(hipEventSynchronize(stop));
cudaErrCheck(hipEventElapsedTime(&timeBackward2, start, stop));
int numMats = 0;
if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) {
numMats = 2;
}
else if (RNNMode == miopenLSTM) {
numMats = 8;
}
else if (RNNMode == miopenGRU) {
numMats = 6;
}
// Calculate FLOPS
printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
printf("Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2)));
printf("(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1));
printf("(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2));
// Calculate FLOPS
fprintf(fp,"Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
fprintf(fp,"Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2)));
fprintf(fp,"(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1));
fprintf(fp,"(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2));
// Make double-sure everything is finished before we copy for result checking.
hipDeviceSynchronize();
// *********************************************************************************************************
// Print checksums.
// *********************************************************************************************************
if (true) {
float* testOutputi;
float* testOutputh;
float* testOutputc;
int biDirScale = (bidirectional ? 2 : 1);
testOutputi = (float*)malloc(hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float));
testOutputh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
testOutputc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
cudaErrCheck(hipMemcpy(testOutputi, y, hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost));
if (hy != NULL) cudaErrCheck(hipMemcpy(testOutputh, hy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost));
if (cy != NULL && RNNMode == miopenLSTM) cudaErrCheck(hipMemcpy(testOutputc, cy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost));
double checksumi = 0.f;
double checksumh = 0.f;
double checksumc = 0.f;
for (int m = 0; m < miniBatch; m++) {
double localSumi = 0;
double localSumh = 0;
double localSumc = 0;
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < hiddenSize * biDirScale; i++) {
localSumi += testOutputi[j * miniBatch * hiddenSize * biDirScale + m * hiddenSize * biDirScale + i];
}
}
for (int j = 0; j < numLayers * biDirScale; j++) {
for (int i = 0; i < hiddenSize; i++) {
if (hy != NULL) localSumh += testOutputh[j * hiddenSize * miniBatch + m * hiddenSize + i];
if (cy != NULL) if (RNNMode == miopenLSTM) localSumc += testOutputc[j * hiddenSize * miniBatch + m * hiddenSize + i];
}
}
checksumi += localSumi;
checksumh += localSumh;
checksumc += localSumc;
}
printf("i checksum %E ", checksumi);
fprintf(fp,"i checksum %E ", checksumi);
if (RNNMode == miopenLSTM) { printf("c checksum %E ", checksumc); fprintf(fp,"c checksum %E ", checksumc); }
printf("h checksum %E\n", checksumh);
fprintf(fp,"h checksum %E\n", checksumh);
free(testOutputi);
free(testOutputc);
free(testOutputh);
}
if (true) {
float* testOutputdi;
float* testOutputdh;
float* testOutputdc;
int biDirScale = (bidirectional ? 2 : 1);
testOutputdi = (float*)malloc(inputSize * seqLength * miniBatch * sizeof(float));
testOutputdh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
testOutputdc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
cudaErrCheck(hipMemcpy(testOutputdi, dx, seqLength * miniBatch * inputSize * sizeof(float), hipMemcpyDeviceToHost));
if (dhx != NULL) cudaErrCheck(hipMemcpy(testOutputdh, dhx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost));
if (dcx != NULL) if (RNNMode == miopenLSTM) cudaErrCheck(hipMemcpy(testOutputdc, dcx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), hipMemcpyDeviceToHost));
float checksumdi = 0.f;
float checksumdh = 0.f;
float checksumdc = 0.f;
for (int m = 0; m < miniBatch; m++) {
double localSumdi = 0;
double localSumdh = 0;
double localSumdc = 0;
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < inputSize; i++) {
localSumdi += testOutputdi[j * miniBatch * inputSize + m * inputSize + i];
}
}
for (int j = 0; j < numLayers * biDirScale; j++) {
for (int i = 0; i < hiddenSize; i++) {
localSumdh += testOutputdh[j * hiddenSize * miniBatch + m * hiddenSize + i];
if (RNNMode == miopenLSTM) localSumdc += testOutputdc[j * hiddenSize * miniBatch + m * hiddenSize + i];
}
}
checksumdi += localSumdi;
checksumdh += localSumdh;
checksumdc += localSumdc;
}
printf("di checksum %E ", checksumdi);
fprintf(fp,"di checksum %E ", checksumdi);
if (RNNMode == miopenLSTM) { printf("dc checksum %E ", checksumdc); fprintf(fp,"dc checksum %E ", checksumdc); }
printf("dh checksum %E\n", checksumdh);
fprintf(fp,"dh checksum %E\n", checksumdh);
free(testOutputdi);
free(testOutputdh);
free(testOutputdc);
}
if (true) {
float* testOutputdw;
testOutputdw = (float*)malloc(weightsSize);
cudaErrCheck(hipMemcpy(testOutputdw, dw, weightsSize, hipMemcpyDeviceToHost));
double checksumdw = 0.;
for (int i = 0; i < weightsSize / sizeof(float); i++) {
checksumdw += testOutputdw[i];
}
printf("dw checksum %E\n", checksumdw);
fprintf(fp,"dw checksum %E\n", checksumdw);
free(testOutputdw);
}
if (RNNAlgo == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) {
cudnnDestroyPersistentRNNPlan(rnnPlan);
}
hipFree(x);
hipFree(hx);
hipFree(cx);
hipFree(y);
hipFree(hy);
hipFree(cy);
hipFree(dx);
hipFree(dhx);
hipFree(dcx);
hipFree(dy);
hipFree(dhy);
hipFree(dcy);
hipFree(workspace);
hipFree(reserveSpace);
hipFree(w);
hipFree(dw);
hipFree(states);
for (int i = 0; i < seqLength; i++) {
cudnnDestroyTensorDescriptor(xDesc[i]);
cudnnDestroyTensorDescriptor(yDesc[i]);
cudnnDestroyTensorDescriptor(dxDesc[i]);
cudnnDestroyTensorDescriptor(dyDesc[i]);
}
cudnnDestroyTensorDescriptor(hxDesc);
cudnnDestroyTensorDescriptor(cxDesc);
cudnnDestroyTensorDescriptor(hyDesc);
cudnnDestroyTensorDescriptor(cyDesc);
cudnnDestroyTensorDescriptor(dhxDesc);
cudnnDestroyTensorDescriptor(dcxDesc);
cudnnDestroyTensorDescriptor(dhyDesc);
cudnnDestroyTensorDescriptor(dcyDesc);
cudnnDestroyDropoutDescriptor(dropoutDesc);
cudnnDestroyRNNDescriptor(rnnDesc);
cudnnDestroyFilterDescriptor(wDesc);
cudnnDestroyFilterDescriptor(dwDesc);
free(xDesc);
free(yDesc);
free(dxDesc);
free(dyDesc);
cudnnDestroy(cudnnHandle);
fclose(fp);
return 0;
}
| 7d5d737f603e885bacb79b7f366be16edc2df07f.cu | /**
* Copyright 2016 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cudnn.h>
#include <cuda.h>
#include <stdio.h>
// Reference outputs (calculated on an M40 GPU)
// > ./RNN 20 2 512 64 0
// Forward: 1299 GFLOPs
// Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs)
// i checksum 1.315793E+06 h checksum 1.315212E+05
// di checksum 6.676003E+01 dh checksum 6.425067E+01
// dw checksum 1.453750E+09
//
// > ./RNN 20 2 512 64 1
// Forward: 1296 GFLOPs
// Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs)
// i checksum 6.319591E+05 h checksum 6.319605E+04
// di checksum 4.501830E+00 dh checksum 4.489546E+00
// dw checksum 5.012598E+07
//
// > ./RNN 20 2 512 64 2
// Forward: 2635 GFLOPs
// Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs)
// i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04
// di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01
// dw checksum 4.313461E+08
//
// > ./RNN 20 2 512 64 3
// Forward: 2428 GFLOPs
// Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs)
// i checksum 6.358978E+05 h checksum 6.281680E+04
// di checksum 6.296622E+00 dh checksum 2.289960E+05
// dw checksum 5.397419E+07
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); }
void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) {
if (stat != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line);
}
}
__global__ void initGPUData_ker(float *data, int numElements, float value) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < numElements) {
data[tid] = value;
}
}
void initGPUData(float *data, int numElements, float value) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
initGPUData_ker <<< gridDim, blockDim >>> (data, numElements, value);
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int inputSize;
int miniBatch;
float dropout;
bool bidirectional;
int mode;
int persistent;
FILE *fp;
fp=fopen("result.txt","w");
if (argc == 6) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
inputSize = hiddenSize;
miniBatch = atoi(argv[4]);
dropout = 0;
bidirectional = 0;
mode = atoi(argv[5]);
persistent = 0;
}
else {
printf("Usage:\n");
printf("./RNN <seqLength> <numLayers> <hiddenSize> <miniBatch> <mode>\n");
printf("Modes: 0 = RNN_RELU, 1 = RNN_TANH, 2 = LSTM, 3 = GRU\n");
return 1;
}
// -------------------------
// Create cudnn context
// -------------------------
cudnnHandle_t cudnnHandle;
cudnnErrCheck(cudnnCreate(&cudnnHandle));
// -------------------------
// Set up inputs and outputs
// -------------------------
void *x;
void *hx = NULL;
void *cx = NULL;
void *dx;
void *dhx = NULL;
void *dcx = NULL;
void *y;
void *hy = NULL;
void *cy = NULL;
void *dy;
void *dhy = NULL;
void *dcy = NULL;
// Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL.
cudaErrCheck(cudaMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float)));
// Set up tensor descriptors. x/y/dx/dy are arrays, one per time step.
cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc;
cudnnTensorDescriptor_t hxDesc, cxDesc;
cudnnTensorDescriptor_t hyDesc, cyDesc;
cudnnTensorDescriptor_t dhxDesc, dcxDesc;
cudnnTensorDescriptor_t dhyDesc, dcyDesc;
xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t));
int dimA[3];
int strideA[3];
// In this example dimA[1] is constant across the whole sequence
// This isn't required, all that is required is that it does not increase.
for (int i = 0; i < seqLength; i++) {
cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i]));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i]));
dimA[0] = miniBatch;
dimA[1] = inputSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
dimA[0] = miniBatch;
dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize;
dimA[2] = 1;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA));
}
dimA[0] = numLayers * (bidirectional ? 2 : 1);
dimA[1] = miniBatch;
dimA[2] = hiddenSize;
strideA[0] = dimA[2] * dimA[1];
strideA[1] = dimA[2];
strideA[2] = 1;
cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc));
cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA));
// -------------------------
// Set up the dropout descriptor (needed for the RNN descriptor)
// -------------------------
unsigned long long seed = 1337ull; // Pick a seed.
cudnnDropoutDescriptor_t dropoutDesc;
cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize));
cudaErrCheck(cudaMalloc(&states, stateSize));
cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc,
cudnnHandle,
dropout,
states,
stateSize,
seed));
// -------------------------
// Set up the RNN descriptor
// -------------------------
cudnnRNNDescriptor_t rnnDesc;
cudnnRNNMode_t RNNMode;
cudnnRNNAlgo_t RNNAlgo;
cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc));
if (mode == 0) RNNMode = CUDNN_RNN_RELU;
else if (mode == 1) RNNMode = CUDNN_RNN_TANH;
else if (mode == 2) RNNMode = CUDNN_LSTM;
else if (mode == 3) RNNMode = CUDNN_GRU;
// Persistent RNNs are only supported on Pascal+ GPUs.
if (persistent == 0) RNNAlgo = CUDNN_RNN_ALGO_STANDARD;
else if (persistent == 1) RNNAlgo = CUDNN_RNN_ALGO_PERSIST_STATIC;
else if (persistent == 2) RNNAlgo = CUDNN_RNN_ALGO_PERSIST_DYNAMIC;
cudnnErrCheck(cudnnSetRNNDescriptor_v6(cudnnHandle,
rnnDesc,
hiddenSize,
numLayers,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL,
RNNMode,
RNNAlgo, // Can be changed to use persistent RNNs on Pascal+ GPUs.
CUDNN_DATA_FLOAT));
// -------------------------
// Set up parameters
// -------------------------
// This needs to be done after the rnn descriptor is set as otherwise
// we don't know how many parameters we have to allocate
void *w;
void *dw;
cudnnFilterDescriptor_t wDesc, dwDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc));
cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc));
size_t weightsSize;
cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT));
int dimW[3];
dimW[0] = weightsSize / sizeof(float);
dimW[1] = 1;
dimW[2] = 1;
cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW));
cudaErrCheck(cudaMalloc((void**)&w, weightsSize));
cudaErrCheck(cudaMalloc((void**)&dw, weightsSize));
// -------------------------
// Set up work space and reserved memory
// -------------------------
void *workspace;
void *reserveSpace;
size_t workSize;
size_t reserveSize;
// Need for every pass
cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize));
// Only needed in training, shouldn't be touched between passes.
cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize));
cudaErrCheck(cudaMalloc((void**)&workspace, workSize));
cudaErrCheck(cudaMalloc((void**)&reserveSpace, reserveSize));
// *********************************************************************************************************
// Initialise weights and inputs
// *********************************************************************************************************
// We initialise to something simple.
// Matrices are initialised to 1 / matrixSize, biases to 1, data is 1.
initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f);
if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f);
// Weights
int numLinearLayers = 0;
if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) {
numLinearLayers = 2;
}
else if (RNNMode == CUDNN_LSTM) {
numLinearLayers = 8;
}
else if (RNNMode == CUDNN_GRU) {
numLinearLayers = 6;
}
for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) {
for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) {
cudnnFilterDescriptor_t linLayerMatDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc));
float *linLayerMat;
cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerMatDesc,
(void**)&linLayerMat));
cudnnDataType_t dataType;
cudnnTensorFormat_t format;
int nbDims;
int filterDimA[3];
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2]));
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc));
cudnnFilterDescriptor_t linLayerBiasDesc;
cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc));
float *linLayerBias;
cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle,
rnnDesc,
layer,
xDesc[0],
wDesc,
w,
linLayerID,
linLayerBiasDesc,
(void**)&linLayerBias));
cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc,
3,
&dataType,
&format,
&nbDims,
filterDimA));
initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f);
cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc));
}
}
// *********************************************************************************************************
// Dynamic persistent RNN plan (if using this algo)
// *********************************************************************************************************
cudnnPersistentRNNPlan_t rnnPlan;
if (RNNAlgo == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) {
// Note: This step is expensive. Once completed the plan can be reused so long as the descriptor
// minibatch or datatype don't change.
cudnnErrCheck(cudnnCreatePersistentRNNPlan(rnnDesc, miniBatch, CUDNN_DATA_FLOAT, &rnnPlan));
// Tell calls using this descriptor which plan to use.
cudnnErrCheck(cudnnSetPersistentRNNPlan(rnnDesc, rnnPlan));
}
// *********************************************************************************************************
// At this point all of the setup is done. We now need to pass through the RNN.
// *********************************************************************************************************
cudaErrCheck(cudaDeviceSynchronize());
cudaEvent_t start, stop;
float timeForward, timeBackward1, timeBackward2;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
cudaErrCheck(cudaEventRecord(start));
// If we're not training we use this instead
// cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle,
// rnnDesc,
// seqLength,
// xDesc,
// x,
// hxDesc,
// hx,
// cxDesc,
// cx,
// wDesc,
// w,
// yDesc,
// y,
// hyDesc,
// hy,
// cyDesc,
// cy,
// workspace,
// workSize));
cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
cxDesc,
cx,
wDesc,
w,
yDesc,
y,
hyDesc,
hy,
cyDesc,
cy,
workspace,
workSize,
reserveSpace,
reserveSize));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeForward, start, stop));
cudaErrCheck(cudaEventRecord(start));
cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle,
rnnDesc,
seqLength,
yDesc,
y,
dyDesc,
dy,
dhyDesc,
dhy,
dcyDesc,
dcy,
wDesc,
w,
hxDesc,
hx,
cxDesc,
cx,
dxDesc,
dx,
dhxDesc,
dhx,
dcxDesc,
dcx,
workspace,
workSize,
reserveSpace,
reserveSize ));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeBackward1, start, stop));
cudaErrCheck(cudaEventRecord(start));
// cudnnRNNBackwardWeights adds to the data in dw.
cudaErrCheck(cudaMemset(dw, 0, weightsSize));
cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle,
rnnDesc,
seqLength,
xDesc,
x,
hxDesc,
hx,
yDesc,
y,
workspace,
workSize,
dwDesc,
dw,
reserveSpace,
reserveSize ));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&timeBackward2, start, stop));
int numMats = 0;
if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) {
numMats = 2;
}
else if (RNNMode == CUDNN_LSTM) {
numMats = 8;
}
else if (RNNMode == CUDNN_GRU) {
numMats = 6;
}
// Calculate FLOPS
printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
printf("Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2)));
printf("(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1));
printf("(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2));
// Calculate FLOPS
fprintf(fp,"Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward));
fprintf(fp,"Backward: %3.0f GFLOPS, ", numMats * 4ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * (timeBackward1 + timeBackward2)));
fprintf(fp,"(%3.0f GFLOPS), ", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward1));
fprintf(fp,"(%3.0f GFLOPS)\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeBackward2));
// Make double-sure everything is finished before we copy for result checking.
cudaDeviceSynchronize();
// *********************************************************************************************************
// Print checksums.
// *********************************************************************************************************
if (true) {
float* testOutputi;
float* testOutputh;
float* testOutputc;
int biDirScale = (bidirectional ? 2 : 1);
testOutputi = (float*)malloc(hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float));
testOutputh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
testOutputc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
cudaErrCheck(cudaMemcpy(testOutputi, y, hiddenSize * seqLength * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost));
if (hy != NULL) cudaErrCheck(cudaMemcpy(testOutputh, hy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost));
if (cy != NULL && RNNMode == CUDNN_LSTM) cudaErrCheck(cudaMemcpy(testOutputc, cy, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost));
double checksumi = 0.f;
double checksumh = 0.f;
double checksumc = 0.f;
for (int m = 0; m < miniBatch; m++) {
double localSumi = 0;
double localSumh = 0;
double localSumc = 0;
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < hiddenSize * biDirScale; i++) {
localSumi += testOutputi[j * miniBatch * hiddenSize * biDirScale + m * hiddenSize * biDirScale + i];
}
}
for (int j = 0; j < numLayers * biDirScale; j++) {
for (int i = 0; i < hiddenSize; i++) {
if (hy != NULL) localSumh += testOutputh[j * hiddenSize * miniBatch + m * hiddenSize + i];
if (cy != NULL) if (RNNMode == CUDNN_LSTM) localSumc += testOutputc[j * hiddenSize * miniBatch + m * hiddenSize + i];
}
}
checksumi += localSumi;
checksumh += localSumh;
checksumc += localSumc;
}
printf("i checksum %E ", checksumi);
fprintf(fp,"i checksum %E ", checksumi);
if (RNNMode == CUDNN_LSTM) { printf("c checksum %E ", checksumc); fprintf(fp,"c checksum %E ", checksumc); }
printf("h checksum %E\n", checksumh);
fprintf(fp,"h checksum %E\n", checksumh);
free(testOutputi);
free(testOutputc);
free(testOutputh);
}
if (true) {
float* testOutputdi;
float* testOutputdh;
float* testOutputdc;
int biDirScale = (bidirectional ? 2 : 1);
testOutputdi = (float*)malloc(inputSize * seqLength * miniBatch * sizeof(float));
testOutputdh = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
testOutputdc = (float*)malloc(hiddenSize * miniBatch * numLayers * biDirScale * sizeof(float));
cudaErrCheck(cudaMemcpy(testOutputdi, dx, seqLength * miniBatch * inputSize * sizeof(float), cudaMemcpyDeviceToHost));
if (dhx != NULL) cudaErrCheck(cudaMemcpy(testOutputdh, dhx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost));
if (dcx != NULL) if (RNNMode == CUDNN_LSTM) cudaErrCheck(cudaMemcpy(testOutputdc, dcx, numLayers * hiddenSize * miniBatch * biDirScale * sizeof(float), cudaMemcpyDeviceToHost));
float checksumdi = 0.f;
float checksumdh = 0.f;
float checksumdc = 0.f;
for (int m = 0; m < miniBatch; m++) {
double localSumdi = 0;
double localSumdh = 0;
double localSumdc = 0;
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < inputSize; i++) {
localSumdi += testOutputdi[j * miniBatch * inputSize + m * inputSize + i];
}
}
for (int j = 0; j < numLayers * biDirScale; j++) {
for (int i = 0; i < hiddenSize; i++) {
localSumdh += testOutputdh[j * hiddenSize * miniBatch + m * hiddenSize + i];
if (RNNMode == CUDNN_LSTM) localSumdc += testOutputdc[j * hiddenSize * miniBatch + m * hiddenSize + i];
}
}
checksumdi += localSumdi;
checksumdh += localSumdh;
checksumdc += localSumdc;
}
printf("di checksum %E ", checksumdi);
fprintf(fp,"di checksum %E ", checksumdi);
if (RNNMode == CUDNN_LSTM) { printf("dc checksum %E ", checksumdc); fprintf(fp,"dc checksum %E ", checksumdc); }
printf("dh checksum %E\n", checksumdh);
fprintf(fp,"dh checksum %E\n", checksumdh);
free(testOutputdi);
free(testOutputdh);
free(testOutputdc);
}
if (true) {
float* testOutputdw;
testOutputdw = (float*)malloc(weightsSize);
cudaErrCheck(cudaMemcpy(testOutputdw, dw, weightsSize, cudaMemcpyDeviceToHost));
double checksumdw = 0.;
for (int i = 0; i < weightsSize / sizeof(float); i++) {
checksumdw += testOutputdw[i];
}
printf("dw checksum %E\n", checksumdw);
fprintf(fp,"dw checksum %E\n", checksumdw);
free(testOutputdw);
}
if (RNNAlgo == CUDNN_RNN_ALGO_PERSIST_DYNAMIC) {
cudnnDestroyPersistentRNNPlan(rnnPlan);
}
cudaFree(x);
cudaFree(hx);
cudaFree(cx);
cudaFree(y);
cudaFree(hy);
cudaFree(cy);
cudaFree(dx);
cudaFree(dhx);
cudaFree(dcx);
cudaFree(dy);
cudaFree(dhy);
cudaFree(dcy);
cudaFree(workspace);
cudaFree(reserveSpace);
cudaFree(w);
cudaFree(dw);
cudaFree(states);
for (int i = 0; i < seqLength; i++) {
cudnnDestroyTensorDescriptor(xDesc[i]);
cudnnDestroyTensorDescriptor(yDesc[i]);
cudnnDestroyTensorDescriptor(dxDesc[i]);
cudnnDestroyTensorDescriptor(dyDesc[i]);
}
cudnnDestroyTensorDescriptor(hxDesc);
cudnnDestroyTensorDescriptor(cxDesc);
cudnnDestroyTensorDescriptor(hyDesc);
cudnnDestroyTensorDescriptor(cyDesc);
cudnnDestroyTensorDescriptor(dhxDesc);
cudnnDestroyTensorDescriptor(dcxDesc);
cudnnDestroyTensorDescriptor(dhyDesc);
cudnnDestroyTensorDescriptor(dcyDesc);
cudnnDestroyDropoutDescriptor(dropoutDesc);
cudnnDestroyRNNDescriptor(rnnDesc);
cudnnDestroyFilterDescriptor(wDesc);
cudnnDestroyFilterDescriptor(dwDesc);
free(xDesc);
free(yDesc);
free(dxDesc);
free(dyDesc);
cudnnDestroy(cudnnHandle);
fclose(fp);
return 0;
}
|
b0c65ad4c966a615272d540f075e79dd86aebd62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mat_mult_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int mat_rows = 1;
int mat_cols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mat_mult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,mat_rows,mat_cols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mat_mult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,mat_rows,mat_cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mat_mult_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,mat_rows,mat_cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b0c65ad4c966a615272d540f075e79dd86aebd62.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mat_mult_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int mat_rows = 1;
int mat_cols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mat_mult_kernel<<<gridBlock,threadBlock>>>(a,b,c,mat_rows,mat_cols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mat_mult_kernel<<<gridBlock,threadBlock>>>(a,b,c,mat_rows,mat_cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mat_mult_kernel<<<gridBlock,threadBlock>>>(a,b,c,mat_rows,mat_cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2b040161f70d9cb574f8db6e30e140fc2279db22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2012
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "hip/hip_complex.h"
extern "C"
{
#include "core.h"
#include "vector.h"
}
#include "debug.h"
#define BLOCK_SIZE 512
__global__ void spgpuZaxpby_krn(hipDoubleComplex *z, int n, hipDoubleComplex beta, hipDoubleComplex *y, hipDoubleComplex alpha, hipDoubleComplex* x)
{
int id = threadIdx.x + BLOCK_SIZE*blockIdx.x;
if (id < n)
{
// Since z, x and y are accessed with the same offset by the same thread,
// and the write to z follows the x and y read, x, y and z can share the same base address (in-place computing).
if (cuDoubleComplex_isZero(beta))
z[id] = cuCmul(alpha,x[id]);
else
z[id] = cuCfma(alpha, x[id], cuCmul(beta,y[id]));
}
}
void spgpuZaxpby_(spgpuHandle_t handle,
__device hipDoubleComplex *z,
int n,
hipDoubleComplex beta,
__device hipDoubleComplex *y,
hipDoubleComplex alpha,
__device hipDoubleComplex* x)
{
int msize = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 block(BLOCK_SIZE);
dim3 grid(msize);
hipLaunchKernelGGL(( spgpuZaxpby_krn), dim3(grid), dim3(block), 0, handle->currentStream, z, n, beta, y, alpha, x);
}
void spgpuZaxpby(spgpuHandle_t handle,
__device hipDoubleComplex *z,
int n,
hipDoubleComplex beta,
__device hipDoubleComplex *y,
hipDoubleComplex alpha,
__device hipDoubleComplex* x)
{
int maxNForACall = max(handle->maxGridSizeX, BLOCK_SIZE*handle->maxGridSizeX);
while (n > maxNForACall) //managing large vectors
{
spgpuZaxpby_(handle, z, maxNForACall, beta, y, alpha, x);
x = x + maxNForACall;
y = y + maxNForACall;
z = z + maxNForACall;
n -= maxNForACall;
}
spgpuZaxpby_(handle, z, n, beta, y, alpha, x);
cudaCheckError("CUDA error on daxpby");
}
void spgpuZmaxpby(spgpuHandle_t handle,
__device hipDoubleComplex *z,
int n,
hipDoubleComplex beta,
__device hipDoubleComplex *y,
hipDoubleComplex alpha,
__device hipDoubleComplex* x,
int count, int pitch)
{
for (int i=0; i<count; i++)
spgpuZaxpby(handle, z+pitch*i, n, beta, y+pitch*i, alpha, x+pitch*i);
}
| 2b040161f70d9cb574f8db6e30e140fc2279db22.cu | /*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2012
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "cuComplex.h"
extern "C"
{
#include "core.h"
#include "vector.h"
}
#include "debug.h"
#define BLOCK_SIZE 512
__global__ void spgpuZaxpby_krn(cuDoubleComplex *z, int n, cuDoubleComplex beta, cuDoubleComplex *y, cuDoubleComplex alpha, cuDoubleComplex* x)
{
int id = threadIdx.x + BLOCK_SIZE*blockIdx.x;
if (id < n)
{
// Since z, x and y are accessed with the same offset by the same thread,
// and the write to z follows the x and y read, x, y and z can share the same base address (in-place computing).
if (cuDoubleComplex_isZero(beta))
z[id] = cuCmul(alpha,x[id]);
else
z[id] = cuCfma(alpha, x[id], cuCmul(beta,y[id]));
}
}
void spgpuZaxpby_(spgpuHandle_t handle,
__device cuDoubleComplex *z,
int n,
cuDoubleComplex beta,
__device cuDoubleComplex *y,
cuDoubleComplex alpha,
__device cuDoubleComplex* x)
{
int msize = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 block(BLOCK_SIZE);
dim3 grid(msize);
spgpuZaxpby_krn<<<grid, block, 0, handle->currentStream>>>(z, n, beta, y, alpha, x);
}
void spgpuZaxpby(spgpuHandle_t handle,
__device cuDoubleComplex *z,
int n,
cuDoubleComplex beta,
__device cuDoubleComplex *y,
cuDoubleComplex alpha,
__device cuDoubleComplex* x)
{
int maxNForACall = max(handle->maxGridSizeX, BLOCK_SIZE*handle->maxGridSizeX);
while (n > maxNForACall) //managing large vectors
{
spgpuZaxpby_(handle, z, maxNForACall, beta, y, alpha, x);
x = x + maxNForACall;
y = y + maxNForACall;
z = z + maxNForACall;
n -= maxNForACall;
}
spgpuZaxpby_(handle, z, n, beta, y, alpha, x);
cudaCheckError("CUDA error on daxpby");
}
void spgpuZmaxpby(spgpuHandle_t handle,
__device cuDoubleComplex *z,
int n,
cuDoubleComplex beta,
__device cuDoubleComplex *y,
cuDoubleComplex alpha,
__device cuDoubleComplex* x,
int count, int pitch)
{
for (int i=0; i<count; i++)
spgpuZaxpby(handle, z+pitch*i, n, beta, y+pitch*i, alpha, x+pitch*i);
}
|
497f4543b115145768a172a0737fa96c8e82da89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Mask_Subtract_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *devOut = NULL;
hipMalloc(&devOut, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Mask_Subtract_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,devOut);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Mask_Subtract_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,devOut);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Mask_Subtract_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,devOut);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 497f4543b115145768a172a0737fa96c8e82da89.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Mask_Subtract_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *devOut = NULL;
cudaMalloc(&devOut, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Mask_Subtract_Kernel<<<gridBlock,threadBlock>>>(A,B,devOut);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Mask_Subtract_Kernel<<<gridBlock,threadBlock>>>(A,B,devOut);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Mask_Subtract_Kernel<<<gridBlock,threadBlock>>>(A,B,devOut);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b34531198356f67176b7a3c327e226d3bc31e4b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "OmnidirectionalCamera.cuh"
#include <opencv2/cudev/ptr2d/glob.hpp>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
/*******************************
* cuda_Join_kernel
* arguments
* right : input data pointer (GlobPtrSz)
* left : input data pointer (GlobPtrSz)
* dst : output data pointer (GlobPtrSz)
* vdiff : vertical diffarence offset (int)
* blendWidth : alpha blend area width to stiting (int)
*******************************/
__global__ void cuda_Join_kernel( const cv::cudev::GlobPtrSz<uchar> right ,const cv::cudev::GlobPtrSz<uchar> left , cv::cudev::GlobPtrSz<uchar> dst, int vdiff, int blendWidth){
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int dst_color_tid = y * dst.step + (3 * x);
const int right_color_tid = y * right.step + (3 * (x+ (blendWidth/2)) );
const int left_color_tid = ( (y+vdiff) * left.step ) + (3 * (x+ (blendWidth/2) - (dst.cols/6)));
if((x < dst.cols/3) && (y < dst.rows)){
if(x < ((dst.cols/6)-blendWidth/2 ) ){
dst.data[dst_color_tid + 0] = right.data[right_color_tid + 0];
dst.data[dst_color_tid + 1] = right.data[right_color_tid + 1];
dst.data[dst_color_tid + 2] = right.data[right_color_tid + 2];
}else if(x < (dst.cols/6)){ //blending area
float alpha = (float)(x - ( (dst.cols/6) - (blendWidth/2) ) )/(float)blendWidth*2;
dst.data[dst_color_tid + 0] = right.data[right_color_tid + 0]*(1-alpha) + left.data[left_color_tid + 0]*alpha;
dst.data[dst_color_tid + 1] = right.data[right_color_tid + 1]*(1-alpha) + left.data[left_color_tid + 1]*alpha;
dst.data[dst_color_tid + 2] = right.data[right_color_tid + 2]*(1-alpha) + left.data[left_color_tid + 2]*alpha;
}else if(x < ((dst.cols/3)-blendWidth/2 ) ){
dst.data[dst_color_tid + 0] = left.data[left_color_tid + 0];
dst.data[dst_color_tid + 1] = left.data[left_color_tid + 1];
dst.data[dst_color_tid + 2] = left.data[left_color_tid + 2];
}else if(x < (dst.cols/3)){ //blending area
float alpha = (float)(x - ( (dst.cols/3) - (blendWidth/2) ) )/(float)blendWidth*2;
dst.data[dst_color_tid + 0] = left.data[left_color_tid + 0]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 0]*alpha;
dst.data[dst_color_tid + 1] = left.data[left_color_tid + 1]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 1]*alpha;
dst.data[dst_color_tid + 2] = left.data[left_color_tid + 2]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 2]*alpha;
}
}
}
/*******************************
* cuda_Join
* arguments
* right : input data pointer (GpuMat)
* left : input data pointer (GpuMat)
* dst : output data pointer (GpuMat)
* vdiff : vertical diffarence offset (int)
* blendWidth : alpha blend area width to stiting (int)
*******************************/
void OmnidirectionalCamera::cuda::Join(cv::cuda::GpuMat &right, cv::cuda::GpuMat &left, cv::cuda::GpuMat &dst , int vdiff, int blendWidth){
//create image pointer
cv::cudev::GlobPtrSz<uchar> p_Right = cv::cudev::globPtr(right.ptr<uchar>(), right.step, right.rows, right.cols * right.channels());
cv::cudev::GlobPtrSz<uchar> p_Left = cv::cudev::globPtr(left.ptr<uchar>() , left.step , left.rows , left.cols * left.channels());
cv::cudev::GlobPtrSz<uchar> p_Dst = cv::cudev::globPtr(dst.ptr<uchar>() , dst.step , dst.rows , dst.cols * dst.channels());
const dim3 block(32, 32);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows , block.y));
hipLaunchKernelGGL(( cuda_Join_kernel), dim3(grid), dim3(block), 0, 0, p_Right, p_Left, p_Dst , vdiff, blendWidth);
CV_CUDEV_SAFE_CALL(hipGetLastError());
CV_CUDEV_SAFE_CALL(hipDeviceSynchronize());
}
| b34531198356f67176b7a3c327e226d3bc31e4b0.cu | #include "OmnidirectionalCamera.cuh"
#include <opencv2/cudev/ptr2d/glob.hpp>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/*******************************
* cuda_Join_kernel
* arguments
* right : input data pointer (GlobPtrSz)
* left : input data pointer (GlobPtrSz)
* dst : output data pointer (GlobPtrSz)
* vdiff : vertical diffarence offset (int)
* blendWidth : alpha blend area width to stiting (int)
*******************************/
__global__ void cuda_Join_kernel( const cv::cudev::GlobPtrSz<uchar> right ,const cv::cudev::GlobPtrSz<uchar> left , cv::cudev::GlobPtrSz<uchar> dst, int vdiff, int blendWidth){
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int dst_color_tid = y * dst.step + (3 * x);
const int right_color_tid = y * right.step + (3 * (x+ (blendWidth/2)) );
const int left_color_tid = ( (y+vdiff) * left.step ) + (3 * (x+ (blendWidth/2) - (dst.cols/6)));
if((x < dst.cols/3) && (y < dst.rows)){
if(x < ((dst.cols/6)-blendWidth/2 ) ){
dst.data[dst_color_tid + 0] = right.data[right_color_tid + 0];
dst.data[dst_color_tid + 1] = right.data[right_color_tid + 1];
dst.data[dst_color_tid + 2] = right.data[right_color_tid + 2];
}else if(x < (dst.cols/6)){ //blending area
float alpha = (float)(x - ( (dst.cols/6) - (blendWidth/2) ) )/(float)blendWidth*2;
dst.data[dst_color_tid + 0] = right.data[right_color_tid + 0]*(1-alpha) + left.data[left_color_tid + 0]*alpha;
dst.data[dst_color_tid + 1] = right.data[right_color_tid + 1]*(1-alpha) + left.data[left_color_tid + 1]*alpha;
dst.data[dst_color_tid + 2] = right.data[right_color_tid + 2]*(1-alpha) + left.data[left_color_tid + 2]*alpha;
}else if(x < ((dst.cols/3)-blendWidth/2 ) ){
dst.data[dst_color_tid + 0] = left.data[left_color_tid + 0];
dst.data[dst_color_tid + 1] = left.data[left_color_tid + 1];
dst.data[dst_color_tid + 2] = left.data[left_color_tid + 2];
}else if(x < (dst.cols/3)){ //blending area
float alpha = (float)(x - ( (dst.cols/3) - (blendWidth/2) ) )/(float)blendWidth*2;
dst.data[dst_color_tid + 0] = left.data[left_color_tid + 0]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 0]*alpha;
dst.data[dst_color_tid + 1] = left.data[left_color_tid + 1]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 1]*alpha;
dst.data[dst_color_tid + 2] = left.data[left_color_tid + 2]*(1-alpha) + right.data[(right_color_tid - (dst.cols))+ 2]*alpha;
}
}
}
/*******************************
* cuda_Join
* arguments
* right : input data pointer (GpuMat)
* left : input data pointer (GpuMat)
* dst : output data pointer (GpuMat)
* vdiff : vertical diffarence offset (int)
* blendWidth : alpha blend area width to stiting (int)
*******************************/
void OmnidirectionalCamera::cuda::Join(cv::cuda::GpuMat &right, cv::cuda::GpuMat &left, cv::cuda::GpuMat &dst , int vdiff, int blendWidth){
//create image pointer
cv::cudev::GlobPtrSz<uchar> p_Right = cv::cudev::globPtr(right.ptr<uchar>(), right.step, right.rows, right.cols * right.channels());
cv::cudev::GlobPtrSz<uchar> p_Left = cv::cudev::globPtr(left.ptr<uchar>() , left.step , left.rows , left.cols * left.channels());
cv::cudev::GlobPtrSz<uchar> p_Dst = cv::cudev::globPtr(dst.ptr<uchar>() , dst.step , dst.rows , dst.cols * dst.channels());
const dim3 block(32, 32);
const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows , block.y));
cuda_Join_kernel<<<grid, block>>>( p_Right, p_Left, p_Dst , vdiff, blendWidth);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());
}
|
ecc489beac05c2030aae2b9c2b778c8f21771d2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "surf_cuda/common.h"
#include "surf_cuda/cuda_mat.h"
#include "surf_cuda/DoH_filter.cuh"
#include "surf_cuda/cuda_util.cuh"
#include "surf_cuda/surf.h"
using namespace surf_cuda;
//cpu DoH function that use opencv provided filter2D functionality
struct DoHFilter_cpu{
float weight{0.9};
int size;
Mat kernel_xx;
Mat kernel_xy;
Mat kernel_yy;
DoHFilter_cpu(int s):size(s){
//create kernels of approximated second derivative of Gaussian
kernel_xx = Mat::zeros(s,s,CV_32F);
kernel_xx(cv::Rect((size/3+1)/2,0,2*size/3 -1, size/3)).setTo(1);
kernel_xx(cv::Rect((size/3+1)/2,size/3,2*size/3 -1, size/3)).setTo(-2);
kernel_xx(cv::Rect((size/3+1)/2,2*size/3,2*size/3-1, size/3)).setTo(1);
//kernel_yy is the rotation of kernel_xx
cv::rotate(kernel_xx,kernel_yy,cv::ROTATE_90_CLOCKWISE);
//kernel_xy
kernel_xy = Mat::zeros(s,s,CV_32F);
kernel_xy(cv::Rect(size/6,size/6,size/3,size/3)).setTo(1);
kernel_xy(cv::Rect(size/2+1,size/6,size/3,size/3)).setTo(-1);
kernel_xy(cv::Rect(size/6,size/2+1,size/3,size/3)).setTo(-1);
kernel_xy(cv::Rect(size/2+1,size/2+1,size/3,size/3)).setTo(1);
}
void operator()(Mat src, Mat& dst){
Mat filter_map_xx;
Mat filter_map_xy;
Mat filter_map_yy;
//filter
cv::filter2D(src,filter_map_xx,CV_32F,kernel_xx,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
cv::filter2D(src,filter_map_xy,CV_32F,kernel_xy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
cv::filter2D(src,filter_map_yy,CV_32F,kernel_yy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
Mat temp1;
cv::multiply(filter_map_xx,filter_map_yy,temp1,1);
Mat temp2;
cv::pow(filter_map_xy*weight,2,temp2);
dst = (temp1 - temp2)/(float)(size*size*size*size);
}
void box_filter_xx(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_xx,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
void box_filter_xy(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_xy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
void box_filter_yy(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_yy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
};
Mat normalize(Mat map){
Mat norm = map.clone();
double max_val;
double min_val;
cv::minMaxLoc(norm,&min_val,&max_val);
cout<<min_val<<' '<<max_val<<endl;
norm = (norm-min_val)/(float)(max_val - min_val);
return norm;
}
#define TEST_IMG 1
#define PRINT 0
int main(){
Mat mat_in_cpu = Mat::ones(10,10,CV_32S);
#if TEST_IMG
Mat img = cv::imread("./data/img1.png");
Mat gray_img;
cv::cvtColor(img,gray_img,cv::COLOR_BGR2GRAY);
gray_img.convertTo(mat_in_cpu,CV_32S);
#endif
int rows = mat_in_cpu.rows;
int cols = mat_in_cpu.cols;
//inpute image
CudaMat cuda_mat_in(rows,cols,CV_32S);
cuda_mat_in.allocate();
cuda_mat_in.allocateArray();
//integral_image
Mat mat_integral_cpu = Mat::zeros(rows,cols,CV_32S);
Mat mat_integral_gpu = Mat::zeros(rows,cols,CV_32S);
CudaMat cuda_mat_integral(rows,cols,CV_32S);
cuda_mat_integral.allocate();
cuda_mat_integral.allocateArray();
//COPY IMAGE TO DEVICE
cuda_mat_in.copyFromMatToArray(mat_in_cpu);
hipTextureDesc texDesc_integral;
memset(&texDesc_integral, 0, sizeof(texDesc_integral));
texDesc_integral.addressMode[0] = hipAddressModeClamp;
texDesc_integral.addressMode[1] = hipAddressModeClamp;
texDesc_integral.filterMode = hipFilterModePoint;
texDesc_integral.readMode = hipReadModeElementType;
texDesc_integral.normalizedCoords = 0;
//set texture object
cuda_mat_in.setTextureObjectInterface(texDesc_integral);
SURF surf;
//Create DoH filter
DoHFilter doh_filter_gpu(9);
DoHFilter_cpu doh_filter_cpu(9);
//Blob response map
//===============================
//Stride one test
//CPU result
Mat response_map_stride1_cpu = Mat::zeros(rows,cols,CV_32F);
//GPU result using global memory on Host
Mat response_map_stride1_gpu = Mat::zeros(rows,cols,CV_32F);
//GPU result using texture memory on Host
Mat response_map_stride1_gpu_tex = Mat::zeros(rows,cols,CV_32F);
//GPU result using global memory on Device
CudaMat cuda_response_map_stride1(rows,cols,CV_32F);
cuda_response_map_stride1.allocate();
//GPU result using texture memory on Device
CudaMat cuda_response_map_stride1_tex(rows, cols, CV_32F);
cuda_response_map_stride1_tex.allocate();
//===============================
//Stride two test
Mat response_map_stride2_cpu = Mat::zeros(rows/2,cols/2,CV_32F);
Mat response_map_stride2_gpu = Mat::zeros(rows/2,cols/2,CV_32F);
Mat response_map_stride2_gpu_tex = Mat::zeros(rows/2,cols/2,CV_32F);
CudaMat cuda_response_map_stride2(rows/2,cols/2,CV_32F);
cuda_response_map_stride2.allocate();
CudaMat cuda_response_map_stride2_tex(rows/2, cols/2, CV_32F);
cuda_response_map_stride2_tex.allocate();
//===============================
//Stride four test
Mat response_map_stride4_cpu = Mat::zeros(rows/4,cols/4,CV_32F);
Mat response_map_stride4_gpu = Mat::zeros(rows/4,cols/4,CV_32F);
Mat response_map_stride4_gpu_tex = Mat::zeros(rows/4,cols/4,CV_32F);
CudaMat cuda_response_map_stride4(rows/4,cols/4,CV_32F);
cuda_response_map_stride4.allocate();
CudaMat cuda_response_map_stride4_tex(rows/4, cols/4, CV_32F);
cuda_response_map_stride4_tex.allocate();
//===============================
//Stride eight test
Mat response_map_stride8_cpu = Mat::zeros(rows/8,cols/8,CV_32F);
Mat response_map_stride8_gpu = Mat::zeros(rows/8,cols/8,CV_32F);
Mat response_map_stride8_gpu_tex = Mat::zeros(rows/8,cols/8,CV_32F);
CudaMat cuda_response_map_stride8(rows/8,cols/8,CV_32F);
cuda_response_map_stride8.allocate();
CudaMat cuda_response_map_stride8_tex(rows/8, cols/8, CV_32F);
cuda_response_map_stride8_tex.allocate();
// Specify texture object parameters
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 0;
//set texture object
cuda_mat_integral.setTextureObjectInterface(texDesc);
//create timer
GpuTimer gpu_timer;
CpuTimer cpu_timer;
//=====================================
//compute integral image
gpu_timer.elapsedTimeStart();
surf.compIntegralImage(cuda_mat_in, cuda_mat_integral);
gpu_timer.elapsedTimeStop();
//=====================================
//copy integral image from global memory to texture memory
gpu_timer.elapsedTimeStart();
//hipMemcpy2DToArray(cuda_array_integral, 0, 0, (void*)cuda_mat_integral.data, cuda_mat_integral.pitch_bytes(), cuda_mat_integral.cols() * sizeof(int), cuda_mat_integral.rows(), hipMemcpyDeviceToDevice);
cuda_mat_integral.copyToArray();
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using Global Memory
gpu_timer.elapsedTimeStart();
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride1,doh_filter_gpu,1);
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using Texture Memory
gpu_timer.elapsedTimeStart();
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride1_tex,doh_filter_gpu,1);
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using GPU
cpu_timer.elapsedTimeStart();
doh_filter_cpu(mat_in_cpu,response_map_stride1_cpu);
cpu_timer.elapsedTimeStop();
cout<<"Compute Stride 2"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride2,doh_filter_gpu,2);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride2_tex,doh_filter_gpu,2);
cout<<"Compute Stride 4"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride4,doh_filter_gpu,4);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride4_tex,doh_filter_gpu,4);
cout<<"Compute Stride 8"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride8,doh_filter_gpu,8);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride8_tex,doh_filter_gpu,8);
//copy response map to host
cuda_response_map_stride1.copyToMat(response_map_stride1_gpu);
cuda_response_map_stride1_tex.copyToMat(response_map_stride1_gpu_tex);
cuda_response_map_stride2.copyToMat(response_map_stride2_gpu);
cuda_response_map_stride2_tex.copyToMat(response_map_stride2_gpu_tex);
cuda_response_map_stride4.copyToMat(response_map_stride4_gpu);
cuda_response_map_stride4_tex.copyToMat(response_map_stride4_gpu_tex);
cuda_response_map_stride8.copyToMat(response_map_stride8_gpu);
cuda_response_map_stride8_tex.copyToMat(response_map_stride8_gpu_tex);
//compare
compare(response_map_stride1_cpu,response_map_stride1_gpu);
compare(response_map_stride1_cpu,response_map_stride1_gpu_tex);
compare(response_map_stride2_gpu,response_map_stride2_gpu_tex);
compare(response_map_stride4_gpu,response_map_stride4_gpu_tex);
compare(response_map_stride8_gpu,response_map_stride8_gpu_tex);
#if 1
cv::namedWindow("CPU DoH");
cv::namedWindow("GPU DoH");
cv::namedWindow("GPU DoH stride2");
cv::imshow("CPU DoH",normalize(response_map_stride1_cpu));
cv::imshow("GPU DoH",normalize(response_map_stride1_gpu));
cv::imshow("GPU DoH stride2",normalize(response_map_stride2_gpu));
cv::imshow("GPU DoH stride2",normalize(response_map_stride2_gpu_tex));
cv::imshow("GPU DoH stride4",normalize(response_map_stride4_gpu));
cv::imshow("GPU DoH stride4",normalize(response_map_stride4_gpu_tex));
cv::imshow("GPU DoH stride8",normalize(response_map_stride8_gpu));
cv::imshow("GPU DoH stride8",normalize(response_map_stride8_gpu_tex));
cv::imwrite("./image/doh_map.png",response_map_stride1_gpu);
cv::waitKey(0);
#endif
#if PRINT
cout<<response_map_stride1_cpu<<endl;
cout<<response_map_stride1_gpu<<endl;
cout<<response_map_stride1_gpu_tex<<endl;
cout<<response_map_stride1_gpu-response_map_stride1_gpu_tex<<endl;
//cout<<response_map_stride1_cpu - response_map_stride1_gpu<<endl;
cout<<response_map_stride2_cpu<<endl;
cout<<response_map_stride2_gpu<<endl;
cout<<response_map_stride2_gpu_tex<<endl;
#endif
} | ecc489beac05c2030aae2b9c2b778c8f21771d2e.cu | #include "surf_cuda/common.h"
#include "surf_cuda/cuda_mat.h"
#include "surf_cuda/DoH_filter.cuh"
#include "surf_cuda/cuda_util.cuh"
#include "surf_cuda/surf.h"
using namespace surf_cuda;
//cpu DoH function that use opencv provided filter2D functionality
struct DoHFilter_cpu{
float weight{0.9};
int size;
Mat kernel_xx;
Mat kernel_xy;
Mat kernel_yy;
DoHFilter_cpu(int s):size(s){
//create kernels of approximated second derivative of Gaussian
kernel_xx = Mat::zeros(s,s,CV_32F);
kernel_xx(cv::Rect((size/3+1)/2,0,2*size/3 -1, size/3)).setTo(1);
kernel_xx(cv::Rect((size/3+1)/2,size/3,2*size/3 -1, size/3)).setTo(-2);
kernel_xx(cv::Rect((size/3+1)/2,2*size/3,2*size/3-1, size/3)).setTo(1);
//kernel_yy is the rotation of kernel_xx
cv::rotate(kernel_xx,kernel_yy,cv::ROTATE_90_CLOCKWISE);
//kernel_xy
kernel_xy = Mat::zeros(s,s,CV_32F);
kernel_xy(cv::Rect(size/6,size/6,size/3,size/3)).setTo(1);
kernel_xy(cv::Rect(size/2+1,size/6,size/3,size/3)).setTo(-1);
kernel_xy(cv::Rect(size/6,size/2+1,size/3,size/3)).setTo(-1);
kernel_xy(cv::Rect(size/2+1,size/2+1,size/3,size/3)).setTo(1);
}
void operator()(Mat src, Mat& dst){
Mat filter_map_xx;
Mat filter_map_xy;
Mat filter_map_yy;
//filter
cv::filter2D(src,filter_map_xx,CV_32F,kernel_xx,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
cv::filter2D(src,filter_map_xy,CV_32F,kernel_xy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
cv::filter2D(src,filter_map_yy,CV_32F,kernel_yy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
Mat temp1;
cv::multiply(filter_map_xx,filter_map_yy,temp1,1);
Mat temp2;
cv::pow(filter_map_xy*weight,2,temp2);
dst = (temp1 - temp2)/(float)(size*size*size*size);
}
void box_filter_xx(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_xx,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
void box_filter_xy(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_xy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
void box_filter_yy(Mat src, Mat& dst){
cv::filter2D(src,dst,CV_32F,kernel_yy,cv::Point(-1,-1),0,cv::BORDER_CONSTANT);
}
};
Mat normalize(Mat map){
Mat norm = map.clone();
double max_val;
double min_val;
cv::minMaxLoc(norm,&min_val,&max_val);
cout<<min_val<<' '<<max_val<<endl;
norm = (norm-min_val)/(float)(max_val - min_val);
return norm;
}
#define TEST_IMG 1
#define PRINT 0
int main(){
Mat mat_in_cpu = Mat::ones(10,10,CV_32S);
#if TEST_IMG
Mat img = cv::imread("./data/img1.png");
Mat gray_img;
cv::cvtColor(img,gray_img,cv::COLOR_BGR2GRAY);
gray_img.convertTo(mat_in_cpu,CV_32S);
#endif
int rows = mat_in_cpu.rows;
int cols = mat_in_cpu.cols;
//inpute image
CudaMat cuda_mat_in(rows,cols,CV_32S);
cuda_mat_in.allocate();
cuda_mat_in.allocateArray();
//integral_image
Mat mat_integral_cpu = Mat::zeros(rows,cols,CV_32S);
Mat mat_integral_gpu = Mat::zeros(rows,cols,CV_32S);
CudaMat cuda_mat_integral(rows,cols,CV_32S);
cuda_mat_integral.allocate();
cuda_mat_integral.allocateArray();
//COPY IMAGE TO DEVICE
cuda_mat_in.copyFromMatToArray(mat_in_cpu);
cudaTextureDesc texDesc_integral;
memset(&texDesc_integral, 0, sizeof(texDesc_integral));
texDesc_integral.addressMode[0] = cudaAddressModeClamp;
texDesc_integral.addressMode[1] = cudaAddressModeClamp;
texDesc_integral.filterMode = cudaFilterModePoint;
texDesc_integral.readMode = cudaReadModeElementType;
texDesc_integral.normalizedCoords = 0;
//set texture object
cuda_mat_in.setTextureObjectInterface(texDesc_integral);
SURF surf;
//Create DoH filter
DoHFilter doh_filter_gpu(9);
DoHFilter_cpu doh_filter_cpu(9);
//Blob response map
//===============================
//Stride one test
//CPU result
Mat response_map_stride1_cpu = Mat::zeros(rows,cols,CV_32F);
//GPU result using global memory on Host
Mat response_map_stride1_gpu = Mat::zeros(rows,cols,CV_32F);
//GPU result using texture memory on Host
Mat response_map_stride1_gpu_tex = Mat::zeros(rows,cols,CV_32F);
//GPU result using global memory on Device
CudaMat cuda_response_map_stride1(rows,cols,CV_32F);
cuda_response_map_stride1.allocate();
//GPU result using texture memory on Device
CudaMat cuda_response_map_stride1_tex(rows, cols, CV_32F);
cuda_response_map_stride1_tex.allocate();
//===============================
//Stride two test
Mat response_map_stride2_cpu = Mat::zeros(rows/2,cols/2,CV_32F);
Mat response_map_stride2_gpu = Mat::zeros(rows/2,cols/2,CV_32F);
Mat response_map_stride2_gpu_tex = Mat::zeros(rows/2,cols/2,CV_32F);
CudaMat cuda_response_map_stride2(rows/2,cols/2,CV_32F);
cuda_response_map_stride2.allocate();
CudaMat cuda_response_map_stride2_tex(rows/2, cols/2, CV_32F);
cuda_response_map_stride2_tex.allocate();
//===============================
//Stride four test
Mat response_map_stride4_cpu = Mat::zeros(rows/4,cols/4,CV_32F);
Mat response_map_stride4_gpu = Mat::zeros(rows/4,cols/4,CV_32F);
Mat response_map_stride4_gpu_tex = Mat::zeros(rows/4,cols/4,CV_32F);
CudaMat cuda_response_map_stride4(rows/4,cols/4,CV_32F);
cuda_response_map_stride4.allocate();
CudaMat cuda_response_map_stride4_tex(rows/4, cols/4, CV_32F);
cuda_response_map_stride4_tex.allocate();
//===============================
//Stride eight test
Mat response_map_stride8_cpu = Mat::zeros(rows/8,cols/8,CV_32F);
Mat response_map_stride8_gpu = Mat::zeros(rows/8,cols/8,CV_32F);
Mat response_map_stride8_gpu_tex = Mat::zeros(rows/8,cols/8,CV_32F);
CudaMat cuda_response_map_stride8(rows/8,cols/8,CV_32F);
cuda_response_map_stride8.allocate();
CudaMat cuda_response_map_stride8_tex(rows/8, cols/8, CV_32F);
cuda_response_map_stride8_tex.allocate();
// Specify texture object parameters
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 0;
//set texture object
cuda_mat_integral.setTextureObjectInterface(texDesc);
//create timer
GpuTimer gpu_timer;
CpuTimer cpu_timer;
//=====================================
//compute integral image
gpu_timer.elapsedTimeStart();
surf.compIntegralImage(cuda_mat_in, cuda_mat_integral);
gpu_timer.elapsedTimeStop();
//=====================================
//copy integral image from global memory to texture memory
gpu_timer.elapsedTimeStart();
//cudaMemcpy2DToArray(cuda_array_integral, 0, 0, (void*)cuda_mat_integral.data, cuda_mat_integral.pitch_bytes(), cuda_mat_integral.cols() * sizeof(int), cuda_mat_integral.rows(), cudaMemcpyDeviceToDevice);
cuda_mat_integral.copyToArray();
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using Global Memory
gpu_timer.elapsedTimeStart();
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride1,doh_filter_gpu,1);
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using Texture Memory
gpu_timer.elapsedTimeStart();
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride1_tex,doh_filter_gpu,1);
gpu_timer.elapsedTimeStop();
//=====================================
//Compute Blob response Map Using GPU
cpu_timer.elapsedTimeStart();
doh_filter_cpu(mat_in_cpu,response_map_stride1_cpu);
cpu_timer.elapsedTimeStop();
cout<<"Compute Stride 2"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride2,doh_filter_gpu,2);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride2_tex,doh_filter_gpu,2);
cout<<"Compute Stride 4"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride4,doh_filter_gpu,4);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride4_tex,doh_filter_gpu,4);
cout<<"Compute Stride 8"<<endl;
compDoHResponseMap(cuda_mat_integral,cuda_response_map_stride8,doh_filter_gpu,8);
compDoHResponseMap_texture(cuda_mat_integral,cuda_response_map_stride8_tex,doh_filter_gpu,8);
//copy response map to host
cuda_response_map_stride1.copyToMat(response_map_stride1_gpu);
cuda_response_map_stride1_tex.copyToMat(response_map_stride1_gpu_tex);
cuda_response_map_stride2.copyToMat(response_map_stride2_gpu);
cuda_response_map_stride2_tex.copyToMat(response_map_stride2_gpu_tex);
cuda_response_map_stride4.copyToMat(response_map_stride4_gpu);
cuda_response_map_stride4_tex.copyToMat(response_map_stride4_gpu_tex);
cuda_response_map_stride8.copyToMat(response_map_stride8_gpu);
cuda_response_map_stride8_tex.copyToMat(response_map_stride8_gpu_tex);
//compare
compare(response_map_stride1_cpu,response_map_stride1_gpu);
compare(response_map_stride1_cpu,response_map_stride1_gpu_tex);
compare(response_map_stride2_gpu,response_map_stride2_gpu_tex);
compare(response_map_stride4_gpu,response_map_stride4_gpu_tex);
compare(response_map_stride8_gpu,response_map_stride8_gpu_tex);
#if 1
cv::namedWindow("CPU DoH");
cv::namedWindow("GPU DoH");
cv::namedWindow("GPU DoH stride2");
cv::imshow("CPU DoH",normalize(response_map_stride1_cpu));
cv::imshow("GPU DoH",normalize(response_map_stride1_gpu));
cv::imshow("GPU DoH stride2",normalize(response_map_stride2_gpu));
cv::imshow("GPU DoH stride2",normalize(response_map_stride2_gpu_tex));
cv::imshow("GPU DoH stride4",normalize(response_map_stride4_gpu));
cv::imshow("GPU DoH stride4",normalize(response_map_stride4_gpu_tex));
cv::imshow("GPU DoH stride8",normalize(response_map_stride8_gpu));
cv::imshow("GPU DoH stride8",normalize(response_map_stride8_gpu_tex));
cv::imwrite("./image/doh_map.png",response_map_stride1_gpu);
cv::waitKey(0);
#endif
#if PRINT
cout<<response_map_stride1_cpu<<endl;
cout<<response_map_stride1_gpu<<endl;
cout<<response_map_stride1_gpu_tex<<endl;
cout<<response_map_stride1_gpu-response_map_stride1_gpu_tex<<endl;
//cout<<response_map_stride1_cpu - response_map_stride1_gpu<<endl;
cout<<response_map_stride2_cpu<<endl;
cout<<response_map_stride2_gpu<<endl;
cout<<response_map_stride2_gpu_tex<<endl;
#endif
} |
9640f6269485f51227fedd9581e4b840eed84726.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd2.cuh"
#include "../include/dervfields_cd2.cuh"
#include "../include/usersource_cd2.cuh"
__device__ __host__
real fluxe2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii, int dir) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
real ddcx=0,ddcy=0;
real fluxt=0;
#ifdef USE_SAC
fluxt= +wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+1,1);
#endif
#ifdef USE_SAC_3D
fluxt= +wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+1,1)+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+2,2);
#endif
return fluxt;
}
__device__ __host__
int divflux_cd2(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
dw[fencode3_cd2(p,ii,field)]= grad3dn_cd2(wd,wd,p,ii,flux,dir);
#ifdef USE_SAC
//commented out to test against vac
/*if(field==energy)
{
dw[fencode3_cd2(p,ii,field)]+=fluxe2(dw, wd, w, p,ii,dir)-w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
}*/
#endif
return ( status);
}
__device__ __host__
int addenergyterms_cd2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
#if defined USE_SAC || defined USE_SAC_3D
if(field==energy)
{
//computept3_cd2(w,wd,p,ii);
//wmod[fencode3_cd2(p,ii,field)]+=fluxe2(dw, wd, wmod, p,ii,dir);/*+w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);*/
wmod[fencode3_cd2(p,ii,field)]-= +(p->dt)*wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
//wmod[fencode3_cd2(p,ii,field)]-= +(p->dt)*wd[fencode3_cd2(p,ii,ptb)]*grad3d_cd2(wd,p,ii,vel1+dir,dir);
for(int idim=0;idim<NDIM;idim++)
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*wmod[fencode3_cd2(p,ii,b1b+idim)]*wmod[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,idim);
//wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*wmod[fencode3_cd2(p,ii,b1b+idim)]*wmod[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+idim,idim);
//fluxt= +(((p->gamma)-1)*w[fencode3_cd2(p,ii,energyb)]- 0.5*((p->gamma)-2)*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b2b)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b3b)]))*grad3d_cd2(wd,p,ii,vel1+dir,dir);
//flux= -(((p->gamma)-1)*w[fencode3_cd2(p,ii,energyb)]- 0.5*((p->gamma)-2)*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b2b)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b3b)]))*grad3d_cd2(wd,p,ii,vel1+dir,dir);
// fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+1,1)+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+2,2);
}
#endif
return ( status);
}
__device__ __host__
int addgrav_cd2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii) {
//int direction;
int status=0;
int field,dir;
//real divflux=0;
//dw[fencode3_cd2(p,ii,field)]= grad_cd2(wd,p,ii,flux,dir);//+grad_cd2(wd,p,ii,f2,1);
for(field=rho;field<NVAR;field++)
{
switch(field)
{
case mom1:
case mom2:
#ifdef USE_SAC_3D
case mom3:
#endif
dir=field-mom1;
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)* (p->g[dir])*w[fencode3_cd2(p,ii,rho)];
break;
case energy:
for(dir=0; dir<NDIM; dir++)
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
break;
}
}
return ( status);
}
__device__ __host__
real transportflux_cd2 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
// real fluxt=0;
//transport flux
//use versions with velocity less ops may improve performance
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,field)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]));
// flux= wd[fencode3_cd2(p,ii,vel1+direction)]*w[fencode3_cd2(p,ii,field)];
#else
return(w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,field)]/w[fencode3_cd2(p,ii,rho)]);
//flux= w[fencode3_cd2(p,ii,vel1+direction)]*w[fencode3_cd2(p,ii,field)];
#endif
}
__device__ __host__
real fluxb1(real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real fluxt=0;
#if defined USE_SAC || defined USE_SAC_3D
fluxt= -(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,field+(NDIM+2)+direction)])*w[fencode3_cd2(p,ii,mom1+(field-b1))]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt+= (w[fencode3_cd2(p,ii,field+(NDIM+2))])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
return fluxt;
}
__device__ __host__
real fluxe1(real *dw, real *wd, real *w, struct params *p,int *ii, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
real ddcx=0,ddcy=0;
real fluxt=0;
//computept3_cd2(w,wd,p,ii);
#if defined USE_SAC
fluxt = w[fencode3_cd2(p,ii,mom1+direction)]*(wd[fencode3_cd2(p,ii,pressuret)]);
fluxt -= w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,mom2)]);
fluxt -= w[fencode3_cd2(p,ii,b1b+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]);
fluxt /= (w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt += w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,energyb)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt -=w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)])/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
#ifdef USE_SAC_3D
fluxt = w[fencode3_cd2(p,ii,mom1+direction)]*(wd[fencode3_cd2(p,ii,pressuret)]);
fluxt -= w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,mom3)]);
fluxt -= w[fencode3_cd2(p,ii,b1b+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3)]*w[fencode3_cd2(p,ii,mom3)]);
fluxt /= (w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt +=w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,energyb)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt -=w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3)]*w[fencode3_cd2(p,ii,mom3)])/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
return fluxt;
}
__device__ __host__
int computefluxe(real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;//, direction;
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,energy,direction)+fluxe1(dw,wd,w,p,ii,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
if(direction==0)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom1)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b1b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
if(direction==1)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom2)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b2b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
wd[fencode3_cd2(p,ii,flux)]=0.0;
int status=0;
#ifdef USE_SAC_3D
if(direction==2)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom3)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b3b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux_cd2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
//int status=0;
switch(field)
{
case energy:
computefluxe(dw,wd,w,p,ii,dir);
// add the following terms for SAC
// del((b bb+ bb b).v)+ptb del v - bb bb del v
break;
case b1:
computefluxb1(dw,wd,w,p,ii,field,dir);
break;
case b2:
computefluxb2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case b3:
computefluxb3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
//return ( status);
}
__global__ void centdiff2a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
// int index;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[0]>1 && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]<((p->n[0]))-2 && ii[0]>1 && ii[1]>1 && ii[1]<((p->n[1])-2))
#endif
divflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff2b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
real del;
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])) && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]<((p->n[0])) && ii[1]>1 && ii[1]<((p->n[1])-2))
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
case 1:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])) )
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
#ifdef USE_SAC_3D
case 2:
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])))
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
#endif
}
__syncthreads();
}
__global__ void centdiff2ci_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
//computevel3_cd2(wmod+(order*NVAR*dimp),wd,p,ii);
computepbg3_cd2(wmod+(ordero*NVAR*dimp),wd,p,ii);
}
__syncthreads();
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
dwn1[fencode3_cd2(p,ii,f)]=0.0;
__syncthreads();
}
__global__ void centdiff2c_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
// int index;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
//if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
#else
if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
#endif
addenergyterms_cd2(dwn1,wd,w,wmod+ordero*NVAR*dimp,p,ii,f,dir);
/* #if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
//if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
//if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms2_cd2(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
#endif*/
/*if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==125 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=0.22113;
w[fencode3_cd2(p,ii,rho)]=0.22113;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=0.22118;
w[fencode3_cd2(p,ii,rho)]=0.22118;
}*/
/*if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==127 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]-4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]-4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==0 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]+4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]+4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==126 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]-4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]-4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==1 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]+4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]+4,rho)];
}*/
__syncthreads();
}
__global__ void grav_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
#else
if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
#endif
addgrav_cd2(dwn1,wd,w,wmod+ordero*NVAR*dimp,p,ii);
__syncthreads();
}
__global__ void source_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int f,dir;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms2_cd2(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
#endif
__syncthreads();
}
__global__ void centdiff2d_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]<ni && ii[1]<(nj))
#endif
#ifdef USE_SAC_3D
if(ii[0]<ni && ii[1]<(nj) && ii[2]<(nk))
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
__syncthreads();
}
__global__ void centdiff2_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff2init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd2(p,ii,f)]=0.0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd2(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff2(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt, int field,int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( centdiff2init_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff2_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_s,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff2a_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff2b_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff2ci_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
hipLaunchKernelGGL(( centdiff2c_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
//hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//printf("source params %G %f %f\n",(*p)->test, (*p)->chyp[0] , (*p)->chyp[1]);
//printf("source params %G \n",(*p)->test);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cugrav(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
int field=rho;
int dir=0;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( grav_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
hipDeviceSynchronize();
//hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//printf("source params %G %f %f %G\n",(*p)->test, (*p)->chyp[0] , (*p)->chyp[1] , (*p)->chyp[2]);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cusource(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
int field=rho;
int dir=0;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
// if(order==0)
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//centdiff2ci_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//hipDeviceSynchronize();
hipLaunchKernelGGL(( source_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt);
hipDeviceSynchronize();
hipMemcpy(*p, *d_p, sizeof(struct params), hipMemcpyDeviceToHost);
//printf("vx vy e %8.16G %8.16G %8.16G\n", (*p)->chyp[0] , (*p)->chyp[1] ,(*p)->test);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| 9640f6269485f51227fedd9581e4b840eed84726.cu | #include "../include/cudapars.h"
#include "../include/paramssteeringtest1.h"
#include "../include/iobparams.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "../include/smaugcukernels.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "../include/gradops_cd2.cuh"
#include "../include/dervfields_cd2.cuh"
#include "../include/usersource_cd2.cuh"
__device__ __host__
real fluxe2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii, int dir) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
real ddcx=0,ddcy=0;
real fluxt=0;
#ifdef USE_SAC
fluxt= +wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+1,1);
#endif
#ifdef USE_SAC_3D
fluxt= +wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+1,1)+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+2,2);
#endif
return fluxt;
}
__device__ __host__
int divflux_cd2(real *dw, real *wd, real *w, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
dw[fencode3_cd2(p,ii,field)]= grad3dn_cd2(wd,wd,p,ii,flux,dir);
#ifdef USE_SAC
//commented out to test against vac
/*if(field==energy)
{
dw[fencode3_cd2(p,ii,field)]+=fluxe2(dw, wd, w, p,ii,dir)-w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
}*/
#endif
return ( status);
}
__device__ __host__
int addenergyterms_cd2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii,int field,int dir) {
int direction;
int status=0;
real divflux=0;
#if defined USE_SAC || defined USE_SAC_3D
if(field==energy)
{
//computept3_cd2(w,wd,p,ii);
//wmod[fencode3_cd2(p,ii,field)]+=fluxe2(dw, wd, wmod, p,ii,dir);/*+w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);*/
wmod[fencode3_cd2(p,ii,field)]-= +(p->dt)*wd[fencode3_cd2(p,ii,ptb)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,dir);
//wmod[fencode3_cd2(p,ii,field)]-= +(p->dt)*wd[fencode3_cd2(p,ii,ptb)]*grad3d_cd2(wd,p,ii,vel1+dir,dir);
for(int idim=0;idim<NDIM;idim++)
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*wmod[fencode3_cd2(p,ii,b1b+idim)]*wmod[fencode3_cd2(p,ii,b1b+dir)]*grad3dn_cd2(wd,wd,p,ii,vel1+dir,idim);
//wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*wmod[fencode3_cd2(p,ii,b1b+idim)]*wmod[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+idim,idim);
//fluxt= +(((p->gamma)-1)*w[fencode3_cd2(p,ii,energyb)]- 0.5*((p->gamma)-2)*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b2b)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b3b)]))*grad3d_cd2(wd,p,ii,vel1+dir,dir);
//flux= -(((p->gamma)-1)*w[fencode3_cd2(p,ii,energyb)]- 0.5*((p->gamma)-2)*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b2b)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b3b)]))*grad3d_cd2(wd,p,ii,vel1+dir,dir);
// fluxt += +w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1,0)+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+1,1)+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,b1b+dir)]*grad3d_cd2(wd,p,ii,vel1+2,2);
}
#endif
return ( status);
}
__device__ __host__
int addgrav_cd2(real *dw, real *wd, real *w, real *wmod, struct params *p,int *ii) {
//int direction;
int status=0;
int field,dir;
//real divflux=0;
//dw[fencode3_cd2(p,ii,field)]= grad_cd2(wd,p,ii,flux,dir);//+grad_cd2(wd,p,ii,f2,1);
for(field=rho;field<NVAR;field++)
{
switch(field)
{
case mom1:
case mom2:
#ifdef USE_SAC_3D
case mom3:
#endif
dir=field-mom1;
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)* (p->g[dir])*w[fencode3_cd2(p,ii,rho)];
break;
case energy:
for(dir=0; dir<NDIM; dir++)
wmod[fencode3_cd2(p,ii,field)]+=(p->dt)*w[fencode3_cd2(p,ii,rho)]*((p->g[dir])*w[fencode3_cd2(p,ii,mom1+dir)] )/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
break;
}
}
return ( status);
}
__device__ __host__
real transportflux_cd2 (real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
// real fluxt=0;
//transport flux
//use versions with velocity less ops may improve performance
#if defined USE_SAC || defined USE_SAC_3D
return(w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,field)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]));
// flux= wd[fencode3_cd2(p,ii,vel1+direction)]*w[fencode3_cd2(p,ii,field)];
#else
return(w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,field)]/w[fencode3_cd2(p,ii,rho)]);
//flux= w[fencode3_cd2(p,ii,vel1+direction)]*w[fencode3_cd2(p,ii,field)];
#endif
}
__device__ __host__
real fluxb1(real *dw, real *wd, real *w, struct params *p,int *ii,int field, int direction) {
real fluxt=0;
#if defined USE_SAC || defined USE_SAC_3D
fluxt= -(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,field+(NDIM+2)+direction)])*w[fencode3_cd2(p,ii,mom1+(field-b1))]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt+= (w[fencode3_cd2(p,ii,field+(NDIM+2))])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
return fluxt;
}
__device__ __host__
real fluxe1(real *dw, real *wd, real *w, struct params *p,int *ii, int direction) {
real ddc=0;
real fi, fim1;
real fip2=0, fim2=0;
real ddcx=0,ddcy=0;
real fluxt=0;
//computept3_cd2(w,wd,p,ii);
#if defined USE_SAC
fluxt = w[fencode3_cd2(p,ii,mom1+direction)]*(wd[fencode3_cd2(p,ii,pressuret)]);
fluxt -= w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,mom2)]);
fluxt -= w[fencode3_cd2(p,ii,b1b+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]);
fluxt /= (w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt += w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,energyb)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt -=w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)])/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
#ifdef USE_SAC_3D
fluxt = w[fencode3_cd2(p,ii,mom1+direction)]*(wd[fencode3_cd2(p,ii,pressuret)]);
fluxt -= w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1b)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2b)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3b)]*w[fencode3_cd2(p,ii,mom3)]);
fluxt -= w[fencode3_cd2(p,ii,b1b+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3)]*w[fencode3_cd2(p,ii,mom3)]);
fluxt /= (w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt +=w[fencode3_cd2(p,ii,mom1+direction)]*w[fencode3_cd2(p,ii,energyb)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
fluxt -=w[fencode3_cd2(p,ii,b1+direction)]*(w[fencode3_cd2(p,ii,b1)]*w[fencode3_cd2(p,ii,mom1)]+w[fencode3_cd2(p,ii,b2)]*w[fencode3_cd2(p,ii,mom2)]+w[fencode3_cd2(p,ii,b3)]*w[fencode3_cd2(p,ii,mom3)])/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);
#endif
return fluxt;
}
__device__ __host__
int computefluxe(real *dw, real *wd, real *w, struct params *p,int *ii,int direction) {
int field;//, direction;
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,energy,direction)+fluxe1(dw,wd,w,p,ii,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb1 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
if(direction==0)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom1)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b1b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
int status=0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
if(direction==1)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
#if defined USE_SAC || defined USE_SAC_3D
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom2)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b2b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
__device__ __host__
int computefluxb3 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int direction) {
wd[fencode3_cd2(p,ii,flux)]=0.0;
int status=0;
#ifdef USE_SAC_3D
if(direction==2)
wd[fencode3_cd2(p,ii,flux)]= 0.0;
else
wd[fencode3_cd2(p,ii,flux)]= transportflux_cd2(dw,wd,w,p,ii,field,direction)-(w[fencode3_cd2(p,ii,b1+direction)]+w[fencode3_cd2(p,ii,b1b+direction)])*w[fencode3_cd2(p,ii,mom3)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)])+ (w[fencode3_cd2(p,ii,b3b)])*w[fencode3_cd2(p,ii,mom1+direction)]/(w[fencode3_cd2(p,ii,rho)]+w[fencode3_cd2(p,ii,rhob)]);//+fluxb1(dw,wd,w,p,ii,field,direction);
#endif
return ( status);
}
//rho, mom1, mom2, mom3, energy, b1, b2, b3
__device__ __host__
void computeflux_cd2 (real *dw, real *wd, real *w, struct params *p,int *ii, int field,int dir) {
//int status=0;
switch(field)
{
case energy:
computefluxe(dw,wd,w,p,ii,dir);
// add the following terms for SAC
// del((b bb+ bb b).v)+ptb del v - bb bb del v
break;
case b1:
computefluxb1(dw,wd,w,p,ii,field,dir);
break;
case b2:
computefluxb2(dw,wd,w,p,ii,field,dir);
break;
#ifdef USE_SAC_3D
case b3:
computefluxb3(dw,wd,w,p,ii,field,dir);
break;
#endif
}
//return ( status);
}
__global__ void centdiff2a_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
// int index;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[0]>1 && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]<((p->n[0]))-2 && ii[0]>1 && ii[1]>1 && ii[1]<((p->n[1])-2))
#endif
divflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,dir);
__syncthreads();
}
__global__ void centdiff2b_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
real del;
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])) && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]<((p->n[0])) && ii[1]>1 && ii[1]<((p->n[1])-2))
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
case 1:
#ifdef USE_SAC_3D
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])) && ii[2]>1 && ii[2]<((p->n[2])-2))
#else
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])) )
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
#ifdef USE_SAC_3D
case 2:
if(ii[0]>1 && ii[0]<((p->n[0])-2) && ii[1]>1 && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])))
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
break;
#endif
}
__syncthreads();
}
__global__ void centdiff2ci_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
//computevel3_cd2(wmod+(order*NVAR*dimp),wd,p,ii);
computepbg3_cd2(wmod+(ordero*NVAR*dimp),wd,p,ii);
}
__syncthreads();
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
dwn1[fencode3_cd2(p,ii,f)]=0.0;
__syncthreads();
}
__global__ void centdiff2c_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
// int index;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
//if(i>1 && j >1 && i<(ni-2) && j<(nj-2))
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
#else
if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
#endif
addenergyterms_cd2(dwn1,wd,w,wmod+ordero*NVAR*dimp,p,ii,f,dir);
/* #if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
//if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
//if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms2_cd2(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
#endif*/
/*if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==125 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=0.22113;
w[fencode3_cd2(p,ii,rho)]=0.22113;
}*/
/* if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==3 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=0.22118;
w[fencode3_cd2(p,ii,rho)]=0.22118;
}*/
/*if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==127 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]-4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]-4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==0 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]+4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]+4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==1 && ii[1]==126 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]-4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]-4,rho)];
}
if( ii[1] <(nj) && ii[0]<(ni) )
if(p->ipe==3 && ii[1]==1 && (p->it)==2)
{
wmod[fencode3_cd2(p,ii,rho)]=wmod[fencode_cd2(p,ii[0],ii[1]+4,rho)];
w[fencode3_cd2(p,ii,rho)]= w[fencode_cd2(p,ii[0],ii[1]+4,rho)];
}*/
__syncthreads();
}
__global__ void grav_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<((p->n[0])-2) && ii[1]<((p->n[1])-2) && ii[2]<((p->n[2])-2) && ii[0]>1 && ii[1]>1 && ii[2]>1 )
#else
if(ii[0]<(p->n[0])-2 && ii[1]<(p->n[1])-2)
#endif
addgrav_cd2(dwn1,wd,w,wmod+ordero*NVAR*dimp,p,ii);
__syncthreads();
}
__global__ void source_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int f,dir;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
//compute pbg used in next source term
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#if(defined(USE_SAC_3D) && defined(USE_USERSOURCE))
if(ii[0]<((p->n[0])) && ii[1]<((p->n[1])) && ii[2]<((p->n[2])) )
#endif
#if(defined(USE_SAC) && defined(USE_USERSOURCE))
if(ii[0]<(p->n[0]) && ii[1]<(p->n[1]))
#endif
#ifdef USE_USERSOURCE
addsourceterms2_cd2(dwn1,wd,wmod+ordero*NVAR*dimp,p,s,ii,f,dir);
#endif
__syncthreads();
}
__global__ void centdiff2d_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int nk=p->n[2];
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC
if(ii[0]<ni && ii[1]<(nj))
#endif
#ifdef USE_SAC_3D
if(ii[0]<ni && ii[1]<(nj) && ii[2]<(nk))
#endif
wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]=wmod[fencode3_cd2(p,ii,f)+(ordero*NVAR*dimp)]-dt*dwn1[fencode3_cd2(p,ii,f)];
__syncthreads();
}
__global__ void centdiff2_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
switch(dir)
{
case 0:
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[0]<p->n[0] && ii[1]>1 && ii[1]<(p->n[1]-2))
#endif
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,0);
break;
case 1:
#ifdef USE_SAC_3D
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[2]>1 && ii[2]<(p->n[2]-2))
#else
if(ii[1]<p->n[1] && ii[0]>1 && ii[0]<(p->n[0]-2))
#endif
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,1);
break;
#ifdef USE_SAC_3D
case 2:
if(ii[2]<p->n[2] && ii[0]>1 && ii[0]<(p->n[0]-2) && ii[1]>1 && ii[1]<(p->n[1]-2))
computeflux_cd2(dwn1,wd,wmod+order*NVAR*dimp,p,ii,f,2);
break;
#endif
}
__syncthreads();
}
__global__ void centdiff2init_parallel(struct params *p, struct state *s, real *w, real *wmod,
real *dwn1, real *wd, int order, int ordero, real dt,int f,int dir)
{
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j,fid;
int ni=p->n[0];
int nj=p->n[1];
int ii[NDIM];
int dimp=((p->n[0]))*((p->n[1]));
#ifdef USE_SAC_3D
int kp;
real dz=p->dx[2];
dimp=((p->n[0]))*((p->n[1]))*((p->n[2]));
#endif
int ip,jp;
#ifdef USE_SAC_3D
kp=iindex/(nj*ni);
jp=(iindex-(kp*(nj*ni)))/ni;
ip=iindex-(kp*nj*ni)-(jp*ni);
#else
jp=iindex/ni;
ip=iindex-(jp*ni);
#endif
fid=0;
ii[0]=ip;
ii[1]=jp;
#ifdef USE_SAC_3D
ii[2]=kp;
#endif
#ifdef USE_SAC_3D
if(ii[0]<p->n[0] && ii[1]<p->n[1] && ii[2]<p->n[2])
#else
if(ii[0]<p->n[0] && ii[1]<p->n[1])
#endif
{
dwn1[fencode3_cd2(p,ii,f)]=0.0;
wd[fencode3_cd2(p,ii,flux)]=0.0;
}
__syncthreads();
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_cd2(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cucentdiff2(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt, int field,int dir)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
centdiff2init_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff2_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_s,*d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff2a_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff2b_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff2ci_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
centdiff2c_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
//cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//printf("source params %G %f %f\n",(*p)->test, (*p)->chyp[0] , (*p)->chyp[1]);
//printf("source params %G \n",(*p)->test);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cugrav(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
int field=rho;
int dir=0;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
grav_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
cudaThreadSynchronize();
//cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//printf("source params %G %f %f %G\n",(*p)->test, (*p)->chyp[0] , (*p)->chyp[1] , (*p)->chyp[2]);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
int cusource(struct params **p, struct params **d_p, struct state **d_s, real **d_w, real **d_wmod, real **d_dwn1, real **d_wd, int order,int ordero, real dt)
{
int dimp=(((*p)->n[0]))*(((*p)->n[1]));
int field=rho;
int dir=0;
#ifdef USE_SAC_3D
dimp=(((*p)->n[0]))*(((*p)->n[1]))*(((*p)->n[2]));
#endif
int numBlocks = (dimp+numThreadsPerBlock-1) / numThreadsPerBlock;
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
// if(order==0)
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//centdiff2ci_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//cudaThreadSynchronize();
source_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt);
cudaThreadSynchronize();
cudaMemcpy(*p, *d_p, sizeof(struct params), cudaMemcpyDeviceToHost);
//printf("vx vy e %8.16G %8.16G %8.16G\n", (*p)->chyp[0] , (*p)->chyp[1] ,(*p)->test);
//centdiff2d_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_s, *d_w, *d_wmod, *d_dwn1, *d_wd, order,ordero,dt,field,dir);
//cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_b, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
f92b3d1617ad235b0cdb9bf9639df492940133a5.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h"
#include "paddle/phi/kernels/layer_norm_kernel.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int LayerNormPlugin::initialize() TRT_NOEXCEPT {
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
return 0;
}
void LayerNormPlugin::terminate() TRT_NOEXCEPT {
if (bias_gpu_) {
hipFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
hipFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
nvinfer1::Dims LayerNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
bool LayerNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
int LayerNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int begin_norm_axis = begin_norm_axis_;
float eps = eps_;
PADDLE_ENFORCE_EQ(1,
mean_shape_.size(),
platform::errors::InvalidArgument(
"Size of mean_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_EQ(1,
variance_shape_.size(),
platform::errors::InvalidArgument(
"Size of variance_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
int64_t batched_mean_shape = mean_shape_[0] * input_dims.d[0];
int64_t batched_variance_shape = variance_shape_[0] * input_dims.d[0];
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(input_ddim, begin_norm_axis);
int feature_size = static_cast<int>(matrix_dim[1]);
PADDLE_ENFORCE_EQ(feature_size,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the feature_size,"
"but got feature_size:%d, scale's size:%d.",
feature_size,
scale_.size()));
PADDLE_ENFORCE_EQ(feature_size,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the feature_size,"
"but got feature_size:%d, bias's size:%d.",
feature_size,
bias_.size()));
int device_id;
hipGetDevice(&device_id);
mean_t.Resize(phi::make_ddim({batched_mean_shape}));
variance_t.Resize(phi::make_ddim({batched_variance_shape}));
float *mean_d = mean_t.mutable_data<float>(platform::CUDAPlace(device_id));
float *variance_d =
variance_t.mutable_data<float>(platform::CUDAPlace(device_id));
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<float, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<half, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The LayerNorm TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
int LayerNormPluginDynamic::initialize() TRT_NOEXCEPT {
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
return 0;
}
void LayerNormPluginDynamic::terminate() TRT_NOEXCEPT {
if (bias_gpu_) {
hipFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
hipFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
nvinfer1::DimsExprs LayerNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool LayerNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of layernorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return ((in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::PluginFormat::kLINEAR));
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
void LayerNormPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc *in,
int nbInputs,
const nvinfer1::DynamicPluginTensorDesc *out,
int nbOutputs) TRT_NOEXCEPT {
const auto &input_dims = in[0].desc.dims;
int statis_num = 1;
for (int i = 0; i < begin_norm_axis_; i++) {
statis_num *= input_dims.d[i];
}
mean_shape_[0] = statis_num;
variance_shape_[0] = statis_num;
}
nvinfer1::DataType LayerNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The LayerNormPlugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int LayerNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int begin_norm_axis = begin_norm_axis_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
// in dynamic shape
// the batch num should be involved in mean/variance shape
PADDLE_ENFORCE_EQ(1,
mean_shape_.size(),
platform::errors::InvalidArgument(
"Size of mean_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_EQ(1,
variance_shape_.size(),
platform::errors::InvalidArgument(
"Size of variance_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_GE(mean_shape_[0],
0,
platform::errors::InvalidArgument(
"The size of mean vector should be positive,"
"but got:%d",
mean_shape_[0]));
PADDLE_ENFORCE_GE(variance_shape_[0],
0,
platform::errors::InvalidArgument(
"The size of mean vector should be positive,"
"but got:%d",
variance_shape_[0]));
const auto input_ddim = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(input_ddim, begin_norm_axis);
int feature_size = static_cast<int>(matrix_dim[1]);
PADDLE_ENFORCE_EQ(feature_size,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the feature_size,"
"but got feature_size:%d, scale's size:%d.",
feature_size,
scale_.size()));
PADDLE_ENFORCE_EQ(feature_size,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the feature_size,"
"but got feature_size:%d, bias's size:%d.",
feature_size,
bias_.size()));
int device_id;
hipGetDevice(&device_id);
mean_t.Resize(phi::make_ddim(mean_shape_));
variance_t.Resize(phi::make_ddim(variance_shape_));
float *mean_d = mean_t.mutable_data<float>(platform::CUDAPlace(device_id));
float *variance_d =
variance_t.mutable_data<float>(platform::CUDAPlace(device_id));
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<float, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<half, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The LayerNorm TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| f92b3d1617ad235b0cdb9bf9639df492940133a5.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h"
#include "paddle/phi/kernels/layer_norm_kernel.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int LayerNormPlugin::initialize() TRT_NOEXCEPT {
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
return 0;
}
void LayerNormPlugin::terminate() TRT_NOEXCEPT {
if (bias_gpu_) {
cudaFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
cudaFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
nvinfer1::Dims LayerNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
bool LayerNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
int LayerNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int begin_norm_axis = begin_norm_axis_;
float eps = eps_;
PADDLE_ENFORCE_EQ(1,
mean_shape_.size(),
platform::errors::InvalidArgument(
"Size of mean_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_EQ(1,
variance_shape_.size(),
platform::errors::InvalidArgument(
"Size of variance_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
int64_t batched_mean_shape = mean_shape_[0] * input_dims.d[0];
int64_t batched_variance_shape = variance_shape_[0] * input_dims.d[0];
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(input_ddim, begin_norm_axis);
int feature_size = static_cast<int>(matrix_dim[1]);
PADDLE_ENFORCE_EQ(feature_size,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the feature_size,"
"but got feature_size:%d, scale's size:%d.",
feature_size,
scale_.size()));
PADDLE_ENFORCE_EQ(feature_size,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the feature_size,"
"but got feature_size:%d, bias's size:%d.",
feature_size,
bias_.size()));
int device_id;
cudaGetDevice(&device_id);
mean_t.Resize(phi::make_ddim({batched_mean_shape}));
variance_t.Resize(phi::make_ddim({batched_variance_shape}));
float *mean_d = mean_t.mutable_data<float>(platform::CUDAPlace(device_id));
float *variance_d =
variance_t.mutable_data<float>(platform::CUDAPlace(device_id));
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<float, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<half, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The LayerNorm TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
int LayerNormPluginDynamic::initialize() TRT_NOEXCEPT {
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
return 0;
}
void LayerNormPluginDynamic::terminate() TRT_NOEXCEPT {
if (bias_gpu_) {
cudaFree(bias_gpu_);
bias_gpu_ = nullptr;
}
if (scale_gpu_) {
cudaFree(scale_gpu_);
scale_gpu_ = nullptr;
}
}
nvinfer1::DimsExprs LayerNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool LayerNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of layernorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return ((in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::PluginFormat::kLINEAR));
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
void LayerNormPluginDynamic::configurePlugin(
const nvinfer1::DynamicPluginTensorDesc *in,
int nbInputs,
const nvinfer1::DynamicPluginTensorDesc *out,
int nbOutputs) TRT_NOEXCEPT {
const auto &input_dims = in[0].desc.dims;
int statis_num = 1;
for (int i = 0; i < begin_norm_axis_; i++) {
statis_num *= input_dims.d[i];
}
mean_shape_[0] = statis_num;
variance_shape_[0] = statis_num;
}
nvinfer1::DataType LayerNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The LayerNormPlugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int LayerNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int begin_norm_axis = begin_norm_axis_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
// in dynamic shape
// the batch num should be involved in mean/variance shape
PADDLE_ENFORCE_EQ(1,
mean_shape_.size(),
platform::errors::InvalidArgument(
"Size of mean_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_EQ(1,
variance_shape_.size(),
platform::errors::InvalidArgument(
"Size of variance_shape vector should be equal to 1,"
"but got Size of mean_shape vector:%d",
mean_shape_.size()));
PADDLE_ENFORCE_GE(mean_shape_[0],
0,
platform::errors::InvalidArgument(
"The size of mean vector should be positive,"
"but got:%d",
mean_shape_[0]));
PADDLE_ENFORCE_GE(variance_shape_[0],
0,
platform::errors::InvalidArgument(
"The size of mean vector should be positive,"
"but got:%d",
variance_shape_[0]));
const auto input_ddim = phi::make_ddim(input_shape);
auto matrix_dim = phi::flatten_to_2d(input_ddim, begin_norm_axis);
int feature_size = static_cast<int>(matrix_dim[1]);
PADDLE_ENFORCE_EQ(feature_size,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the feature_size,"
"but got feature_size:%d, scale's size:%d.",
feature_size,
scale_.size()));
PADDLE_ENFORCE_EQ(feature_size,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the feature_size,"
"but got feature_size:%d, bias's size:%d.",
feature_size,
bias_.size()));
int device_id;
cudaGetDevice(&device_id);
mean_t.Resize(phi::make_ddim(mean_shape_));
variance_t.Resize(phi::make_ddim(variance_shape_));
float *mean_d = mean_t.mutable_data<float>(platform::CUDAPlace(device_id));
float *variance_d =
variance_t.mutable_data<float>(platform::CUDAPlace(device_id));
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<float, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. LayerNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::LayerNormDirectCUDAFunctor<half, float> layer_norm;
layer_norm(stream,
input,
input_shape,
bias_gpu_,
scale_gpu_,
output,
mean_d,
variance_d,
begin_norm_axis,
eps);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The LayerNorm TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
47243f8b01ea72c97e2822f0e00e9ba8904cef71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const ptrdiff_t size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (ptrdiff_t i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (ptrdiff_t i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
} | 47243f8b01ea72c97e2822f0e00e9ba8904cef71.cu | #include "includes.h"
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const ptrdiff_t size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (ptrdiff_t i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (ptrdiff_t i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
} |
08a2d15b584fadcca037de88a79b4392c1388a11.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <string>
#include <vector>
#include "dali/kernels/imgproc/warp_gpu.h"
#include "dali/kernels/imgproc/warp/affine.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/dump_diff.h"
#include "dali/test/mat2tensor.h"
#include "dali/test/test_tensors.h"
#include "dali/kernels/scratch.h"
#include "dali/kernels/alloc.h"
#include "dali/test/dali_test_config.h"
#include "dali/core/geom/transform.h"
namespace dali {
namespace kernels {
class WarpPrivateTest {
public:
template <typename Mapping, int ndim, typename Out, typename In, typename Border>
static kernels::warp::WarpSetup<ndim, Out, In> &
GetSetup(kernels::WarpGPU<Mapping, ndim, Out, In, Border> &kernel) {
return kernel.setup;
}
};
TEST(WarpGPU, check_kernel) {
check_kernel<WarpGPU<AffineMapping2D, 2, float, uint8_t, float>>();
SUCCEED();
}
void WarpGPU_Affine_Transpose(bool force_variable) {
AffineMapping2D mapping_cpu = mat2x3{{
{ 0, 1, 0 },
{ 1, 0, 0 }
}};
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/alley.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(1, 3);
in_list.shape.set_tensor_shape(0, img_tensor.shape);
in_list.data[0] = img_tensor.data;
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, BorderClamp> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1);
TensorShape<2> out_shape = { img_tensor.shape[1], img_tensor.shape[0] };
KernelContext ctx = {};
auto out_shapes_hw = make_span<1>(&out_shape);
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 });
copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 }));
auto interp = DALI_INTERP_NN;
KernelRequirements req;
if (force_variable) {
auto &setup = WarpPrivateTest::GetSetup(warp);
setup.SetBlockDim(dim3(32, 8, 1));
auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw);
req = setup.Setup(out_shapes, true);
req.scratch_sizes[static_cast<int>(AllocType::GPU)] += sizeof(warp::SampleDesc<2, int, int>);
} else {
req = warp.Setup(ctx, in_list, mappings, out_shapes_hw, {&interp, 1});
}
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1});
auto cpu_out = out.cpu(0)[0];
CUDA_CALL(hipDeviceSynchronize());
ASSERT_EQ(cpu_out.shape[0], img_tensor.shape[1]);
ASSERT_EQ(cpu_out.shape[1], img_tensor.shape[0]);
ASSERT_EQ(cpu_out.shape[2], 3);
int errors = 0;
int printed = 0;
for (int y = 0; y < cpu_out.shape[0]; y++) {
for (int x = 0; x < cpu_out.shape[1]; x++) {
for (int c = 0; c < 3; c++) {
if (*cpu_out(y, x, c) != *cpu_img(x, y, c)) {
if (errors++ < 100) {
printed++;
EXPECT_EQ(*cpu_out(y, x, c), *cpu_img(x, y, c))
<< "@ x = " << x << " y = " << y << " c = " << c;
}
}
}
}
}
if (printed != errors) {
FAIL() << (errors - printed) << " more erors.";
}
}
TEST(WarpGPU, Affine_Transpose_ForceVariable) {
WarpGPU_Affine_Transpose(true);
}
TEST(WarpGPU, Affine_Transpose_Single) {
WarpGPU_Affine_Transpose(false);
}
/**
* @brief Apply correction of pixel centers and convert the mapping to
* OpenCV matrix type.
*/
inline cv::Matx<float, 2, 3> AffineToCV(const AffineMapping2D &mapping) {
vec2 translation = mapping({0.5f, 0.5f}) - vec2(0.5f, 0.5f);
mat2x3 tmp = mapping.transform;
tmp.set_col(2, translation);
cv::Matx<float, 2, 3> cv_transform;
for (int i = 0; i < 2; i++)
for (int j = 0; j < 3; j++)
cv_transform(i, j) = tmp(i, j);
return cv_transform;
}
TEST(WarpGPU, Affine_RotateScale_Single) {
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f);
int scale = 10;
auto tr = translation(center) * rotation2D(-M_PI/4) *
translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale));
AffineMapping2D mapping_cpu = sub<2, 3>(tr, 0, 0);
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(1, 3);
in_list.shape.set_tensor_shape(0, img_tensor.shape);
in_list.data[0] = img_tensor.data;
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1);
TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale };
KernelContext ctx = {};
auto out_shapes_hw = make_span<1>(&out_shape);
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 });
copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 }));
auto interp = DALI_INTERP_LINEAR;
auto &setup = WarpPrivateTest::GetSetup(warp);
auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw);
setup.SetBlockDim(dim3(32, 24, 1)); // force non-square block
KernelRequirements req = setup.Setup(out_shapes, true);
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}, 255);
auto cpu_out = out.cpu(0)[0];
CUDA_CALL(hipDeviceSynchronize());
ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[0][0]);
ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[0][1]);
ASSERT_EQ(cpu_out.shape[2], 3);
cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data);
cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu);
cv::Mat cv_ref;
cv::warpAffine(cv_img, cv_ref,
cv_transform, cv::Size(out_shape[1], out_shape[0]),
cv::INTER_LINEAR|cv::WARP_INVERSE_MAP,
cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255));
auto ref_img = view_as_tensor<uint8_t>(cv_ref);
Check(cpu_out, ref_img, EqualEps(8));
if (HasFailure())
testing::DumpDiff("WarpAffine_RotateScale", cv_out, cv_ref);
}
TEST(WarpGPU, Affine_RotateScale_Uniform) {
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f);
const int samples = 10;
std::vector<AffineMapping2D> mapping_cpu(samples);
int scale = 10;
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(samples, 3);
for (int i = 0; i < samples; i++) {
in_list.shape.set_tensor_shape(i, img_tensor.shape);
in_list.data[i] = img_tensor.data;
auto tr = translation(center) * rotation2D(-2*M_PI * i / samples) *
translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale));
mapping_cpu[i] = sub<2, 3>(tr, 0, 0);
}
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, samples);
TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale };
KernelContext ctx = {};
std::vector<TensorShape<2>> out_shapes_hw(samples);
for (int i = 0; i < samples; i++)
out_shapes_hw[i] = out_shape;
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { samples });
copy(mappings, make_tensor_cpu<1>(mapping_cpu.data(), { samples }));
auto interp = DALI_INTERP_LINEAR;
KernelRequirements req = warp.Setup(
ctx, in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255);
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255);
CUDA_CALL(hipDeviceSynchronize());
for (int i = 0; i < samples; i++) {
auto cpu_out = out.cpu(0)[i];
ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[i][0]);
ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[i][1]);
ASSERT_EQ(cpu_out.shape[2], 3);
cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data);
cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu[i]);
cv::Mat cv_ref;
cv::warpAffine(cv_img, cv_ref,
cv_transform, cv::Size(out_shape[1], out_shape[0]),
cv::INTER_LINEAR|cv::WARP_INVERSE_MAP,
cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255));
auto ref_img = view_as_tensor<uint8_t>(cv_ref);
Check(cpu_out, ref_img, EqualEps(8));
if (HasFailure()) {
auto name = "Warp_Affine_RotateScale_" + std::to_string(i);
testing::DumpDiff(name, cv_out, cv_ref);
}
}
}
} // namespace kernels
} // namespace dali
| 08a2d15b584fadcca037de88a79b4392c1388a11.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <string>
#include <vector>
#include "dali/kernels/imgproc/warp_gpu.h"
#include "dali/kernels/imgproc/warp/affine.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/dump_diff.h"
#include "dali/test/mat2tensor.h"
#include "dali/test/test_tensors.h"
#include "dali/kernels/scratch.h"
#include "dali/kernels/alloc.h"
#include "dali/test/dali_test_config.h"
#include "dali/core/geom/transform.h"
namespace dali {
namespace kernels {
class WarpPrivateTest {
public:
template <typename Mapping, int ndim, typename Out, typename In, typename Border>
static kernels::warp::WarpSetup<ndim, Out, In> &
GetSetup(kernels::WarpGPU<Mapping, ndim, Out, In, Border> &kernel) {
return kernel.setup;
}
};
TEST(WarpGPU, check_kernel) {
check_kernel<WarpGPU<AffineMapping2D, 2, float, uint8_t, float>>();
SUCCEED();
}
void WarpGPU_Affine_Transpose(bool force_variable) {
AffineMapping2D mapping_cpu = mat2x3{{
{ 0, 1, 0 },
{ 1, 0, 0 }
}};
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/alley.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(1, 3);
in_list.shape.set_tensor_shape(0, img_tensor.shape);
in_list.data[0] = img_tensor.data;
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, BorderClamp> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1);
TensorShape<2> out_shape = { img_tensor.shape[1], img_tensor.shape[0] };
KernelContext ctx = {};
auto out_shapes_hw = make_span<1>(&out_shape);
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 });
copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 }));
auto interp = DALI_INTERP_NN;
KernelRequirements req;
if (force_variable) {
auto &setup = WarpPrivateTest::GetSetup(warp);
setup.SetBlockDim(dim3(32, 8, 1));
auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw);
req = setup.Setup(out_shapes, true);
req.scratch_sizes[static_cast<int>(AllocType::GPU)] += sizeof(warp::SampleDesc<2, int, int>);
} else {
req = warp.Setup(ctx, in_list, mappings, out_shapes_hw, {&interp, 1});
}
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1});
auto cpu_out = out.cpu(0)[0];
CUDA_CALL(cudaDeviceSynchronize());
ASSERT_EQ(cpu_out.shape[0], img_tensor.shape[1]);
ASSERT_EQ(cpu_out.shape[1], img_tensor.shape[0]);
ASSERT_EQ(cpu_out.shape[2], 3);
int errors = 0;
int printed = 0;
for (int y = 0; y < cpu_out.shape[0]; y++) {
for (int x = 0; x < cpu_out.shape[1]; x++) {
for (int c = 0; c < 3; c++) {
if (*cpu_out(y, x, c) != *cpu_img(x, y, c)) {
if (errors++ < 100) {
printed++;
EXPECT_EQ(*cpu_out(y, x, c), *cpu_img(x, y, c))
<< "@ x = " << x << " y = " << y << " c = " << c;
}
}
}
}
}
if (printed != errors) {
FAIL() << (errors - printed) << " more erors.";
}
}
TEST(WarpGPU, Affine_Transpose_ForceVariable) {
WarpGPU_Affine_Transpose(true);
}
TEST(WarpGPU, Affine_Transpose_Single) {
WarpGPU_Affine_Transpose(false);
}
/**
* @brief Apply correction of pixel centers and convert the mapping to
* OpenCV matrix type.
*/
inline cv::Matx<float, 2, 3> AffineToCV(const AffineMapping2D &mapping) {
vec2 translation = mapping({0.5f, 0.5f}) - vec2(0.5f, 0.5f);
mat2x3 tmp = mapping.transform;
tmp.set_col(2, translation);
cv::Matx<float, 2, 3> cv_transform;
for (int i = 0; i < 2; i++)
for (int j = 0; j < 3; j++)
cv_transform(i, j) = tmp(i, j);
return cv_transform;
}
TEST(WarpGPU, Affine_RotateScale_Single) {
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f);
int scale = 10;
auto tr = translation(center) * rotation2D(-M_PI/4) *
translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale));
AffineMapping2D mapping_cpu = sub<2, 3>(tr, 0, 0);
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(1, 3);
in_list.shape.set_tensor_shape(0, img_tensor.shape);
in_list.data[0] = img_tensor.data;
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, 1);
TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale };
KernelContext ctx = {};
auto out_shapes_hw = make_span<1>(&out_shape);
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { 1 });
copy(mappings, make_tensor_cpu<1>(&mapping_cpu, { 1 }));
auto interp = DALI_INTERP_LINEAR;
auto &setup = WarpPrivateTest::GetSetup(warp);
auto out_shapes = setup.GetOutputShape(in_list.shape, out_shapes_hw);
setup.SetBlockDim(dim3(32, 24, 1)); // force non-square block
KernelRequirements req = setup.Setup(out_shapes, true);
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, out_shapes_hw, {&interp, 1}, 255);
auto cpu_out = out.cpu(0)[0];
CUDA_CALL(cudaDeviceSynchronize());
ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[0][0]);
ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[0][1]);
ASSERT_EQ(cpu_out.shape[2], 3);
cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data);
cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu);
cv::Mat cv_ref;
cv::warpAffine(cv_img, cv_ref,
cv_transform, cv::Size(out_shape[1], out_shape[0]),
cv::INTER_LINEAR|cv::WARP_INVERSE_MAP,
cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255));
auto ref_img = view_as_tensor<uint8_t>(cv_ref);
Check(cpu_out, ref_img, EqualEps(8));
if (HasFailure())
testing::DumpDiff("WarpAffine_RotateScale", cv_out, cv_ref);
}
TEST(WarpGPU, Affine_RotateScale_Uniform) {
cv::Mat cv_img = cv::imread(testing::dali_extra_path() + "/db/imgproc/dots.png");
auto cpu_img = view_as_tensor<uint8_t>(cv_img);
auto gpu_img = copy<AllocType::GPU>(cpu_img);
auto img_tensor = gpu_img.first;
vec2 center(cv_img.cols * 0.5f, cv_img.rows * 0.5f);
const int samples = 10;
std::vector<AffineMapping2D> mapping_cpu(samples);
int scale = 10;
TensorListView<StorageGPU, uint8_t, 3> in_list;
in_list.resize(samples, 3);
for (int i = 0; i < samples; i++) {
in_list.shape.set_tensor_shape(i, img_tensor.shape);
in_list.data[i] = img_tensor.data;
auto tr = translation(center) * rotation2D(-2*M_PI * i / samples) *
translation(-center) * scaling(vec2(1.0f/scale, 1.0f/scale));
mapping_cpu[i] = sub<2, 3>(tr, 0, 0);
}
WarpGPU<AffineMapping2D, 2, uint8_t, uint8_t, uint8_t> warp;
ScratchpadAllocator scratch_alloc;
auto mapping_gpu = memory::alloc_unique<AffineMapping2D>(AllocType::GPU, samples);
TensorShape<2> out_shape = { img_tensor.shape[0] * scale, img_tensor.shape[1] * scale };
KernelContext ctx = {};
std::vector<TensorShape<2>> out_shapes_hw(samples);
for (int i = 0; i < samples; i++)
out_shapes_hw[i] = out_shape;
auto mappings = make_tensor_gpu<1>(mapping_gpu.get(), { samples });
copy(mappings, make_tensor_cpu<1>(mapping_cpu.data(), { samples }));
auto interp = DALI_INTERP_LINEAR;
KernelRequirements req = warp.Setup(
ctx, in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255);
scratch_alloc.Reserve(req.scratch_sizes);
TestTensorList<uint8_t, 3> out;
out.reshape(req.output_shapes[0].to_static<3>());
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
warp.Run(ctx, out.gpu(0), in_list, mappings, make_span(out_shapes_hw), {&interp, 1}, 255);
CUDA_CALL(cudaDeviceSynchronize());
for (int i = 0; i < samples; i++) {
auto cpu_out = out.cpu(0)[i];
ASSERT_EQ(cpu_out.shape[0], out_shapes_hw[i][0]);
ASSERT_EQ(cpu_out.shape[1], out_shapes_hw[i][1]);
ASSERT_EQ(cpu_out.shape[2], 3);
cv::Mat cv_out(cpu_out.shape[0], cpu_out.shape[1], CV_8UC3, cpu_out.data);
cv::Matx<float, 2, 3> cv_transform = AffineToCV(mapping_cpu[i]);
cv::Mat cv_ref;
cv::warpAffine(cv_img, cv_ref,
cv_transform, cv::Size(out_shape[1], out_shape[0]),
cv::INTER_LINEAR|cv::WARP_INVERSE_MAP,
cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255, 255));
auto ref_img = view_as_tensor<uint8_t>(cv_ref);
Check(cpu_out, ref_img, EqualEps(8));
if (HasFailure()) {
auto name = "Warp_Affine_RotateScale_" + std::to_string(i);
testing::DumpDiff(name, cv_out, cv_ref);
}
}
}
} // namespace kernels
} // namespace dali
|
fda6b681a9821972ca5b7eaf69e61b33e883692e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<Windows.h>
#include<stdio.h>
#include<stdlib.h>
#include<gl\glew.h>
#include<gl\GL.h>
#include<math.h>
#include<cuda.h>
#include<cuda_gl_interop.h>
#include"vmath.h"
#define WIN_WIDTH 800
#define WIN_HEIGHT 600
#define DIM 4096
#pragma comment(lib,"user32.lib")
#pragma comment(lib,"gdi32.lib")
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"opengl32.lib")
//using namespace std;
enum InitErrorCodes
{
INIT_VERTEX_SHADER_COMPILATION_FAILED = -9,
INIT_FRAGMENT_SHADER_COMPILATION_FAILED,
INIT_LINK_SHADER_PROGRAM_FAILED,
INIT_FAIL_GLEW_INIT,
INIT_FAIL_BRIDGE_CONTEX_SET,
INIT_FAIL_BRIDGE_CONTEX_CREATION,
INIT_FAIL_SET_PIXEL_FORMAT,
INIT_FAIL_NO_PIXEL_FORMAT,
INIT_FAIL_NO_HDC,
INIT_ALL_OK,
};
enum CUDAInitErrorCodes
{
/* min no -10 */
INIT_CUDA_SETGLDEVICE_FAILED = -21,
INIT_CUDA_REGISTER_BUFFER_FAILED,
CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED,
CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED,
CUDA_INIT_GRAPHICS_UNMAPP_RES_FAILED,
INIT_CUDA_CHOOSEDEVICE_FAILED = -10,
CUDA_INIT_ALL_OK=0,
};
enum attributeBindLocations
{
SAM_ATTRIBUTE_POSITION = 0,
SAM_ATTRIBUTE_COLOR,
SAM_ATTRIBUTE_NORNAL,
SAM_ATTRIBUTE_TEXTURE0,
};
LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam);
bool g_bWindowActive = false;
HWND g_hwnd = NULL;
HDC g_hdc = NULL;
HGLRC g_hrc = NULL;
WINDOWPLACEMENT wpPrev;
DWORD dwStyle;
bool g_bFullScreen = false;
FILE *g_pFile = NULL;
// Shaders
//GLuint iVertexShaderObject = 0;
//GLuint iFragmentShaderObject = 0;
GLuint g_ShaderProgramObject = 0;
// All Vertex Buffers
GLuint g_VertexArrayObject = 0;
GLuint g_VertexBufferObject_Position = 0;
GLuint g_VertexBufferObject_TexCoord = 0;
// Uniforms
GLuint g_Uniform_Model_Matrix = 0;
GLuint g_Uniform_View_Matrix = 0;
GLuint g_Uniform_Projection_Matrix = 0;
// sampler
GLuint g_uniform_TextureSampler;
GLuint g_TextureID;
// Projection
vmath::mat4 g_PersPectiveProjectionMatrix;
// CUDA Res
GLuint bufferOBJ;
cudaGraphicsResource *resource=NULL;
bool gpu_cpu_Switch = false;// default on cpu
float g_fanimate = 0.0f;
bool animation_flag = false;
__global__ void kernel(uchar4 *ptr, float time)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at the position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
unsigned char green = 128 + 127 * sinf(fabsf(fx * 100 * time) - fabsf(fy * 100* time));
//ptr[(y * DIM) + x].x = 0;
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
__global__ void kernel1(uchar4 *ptr, float time)
{
__shared__ unsigned char color;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at the position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
color = y * fabsf(fy * 100 * time);
color += 25 ;
__syncthreads();
//ptr[(y * DIM) + x].x = 0;
ptr[offset].x = color;
ptr[offset].y = color;
ptr[offset].z = color;
ptr[offset].w = 255;
}
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR szCmdLine, int iCmdShow)
{
//int UnInitialize(void);
int Initialize(void);
int Update(void);
void Render(void);
// Windowing Elelments
WNDCLASSEX wndclass;
MSG msg;
HWND hwnd = NULL;
TCHAR szClassName[] = TEXT("Sam_OGL");
RECT windowRect;
// Game Loop Control
bool bDone = false;
// Initialization Status
int iInitRet = 0;
SecureZeroMemory((void*)&wndclass, sizeof(wndclass));
wndclass.cbSize = sizeof(wndclass);
wndclass.cbClsExtra = 0;
wndclass.cbWndExtra = 0;
wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
wndclass.lpfnWndProc = MainWndProc;
wndclass.lpszClassName = szClassName;
wndclass.lpszMenuName = NULL;
wndclass.hInstance = hInstance;
wndclass.hbrBackground = (HBRUSH)GetStockObject(GRAY_BRUSH);
wndclass.hIcon = LoadIcon(hInstance, IDI_APPLICATION);
wndclass.hIconSm = LoadIcon(hInstance, IDI_APPLICATION);
wndclass.hCursor = LoadCursor(hInstance, IDC_ARROW);
if (!RegisterClassEx(&wndclass))
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not RegisterClass() "), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
if ((fopen_s(&g_pFile, "SamLogFile.txt", "w+")) == 0)
{
fprintf_s(g_pFile, "File Opened Successfully. \n");
}
else
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could not open File"), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
SecureZeroMemory((void*)&windowRect, sizeof(windowRect));
windowRect.left = 0;
windowRect.top = 0;
windowRect.bottom = WIN_HEIGHT;
windowRect.right = WIN_WIDTH;
AdjustWindowRectEx(&windowRect, WS_OVERLAPPEDWINDOW, FALSE, WS_EX_APPWINDOW);
hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName,
TEXT("First_OpenGL_Window"),
WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE,
CW_USEDEFAULT, CW_USEDEFAULT,
windowRect.right - windowRect.left,
windowRect.bottom - windowRect.top,
NULL, NULL, hInstance, NULL);
if (hwnd == NULL)
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not CreateWindow() "), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
g_hwnd = hwnd;
iInitRet = Initialize();
switch (iInitRet)
{
case INIT_ALL_OK:
fprintf_s(g_pFile, "Initialize Complete \n");
break;
case INIT_FAIL_NO_HDC:
fprintf_s(g_pFile, "Failed to Get HDC \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_NO_PIXEL_FORMAT:
fprintf_s(g_pFile, "Failed to get PixelFormat \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_SET_PIXEL_FORMAT:
fprintf_s(g_pFile, "Failed to set Pixel Format \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_BRIDGE_CONTEX_CREATION:
fprintf_s(g_pFile, "Failed to wglCreateContext \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_BRIDGE_CONTEX_SET:
fprintf_s(g_pFile, "Failed to wglMakeCurrent \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_GLEW_INIT:
fprintf_s(g_pFile, "Failed to glewInit \n");
DestroyWindow(hwnd);
break;
case INIT_LINK_SHADER_PROGRAM_FAILED:
fprintf_s(g_pFile, "Failed to Link Shader Program Object \n");
DestroyWindow(hwnd);
break;
case INIT_VERTEX_SHADER_COMPILATION_FAILED:
fprintf_s(g_pFile, "Failed to Compile vertex Shader \n");
DestroyWindow(hwnd);
break;
case INIT_FRAGMENT_SHADER_COMPILATION_FAILED:
fprintf_s(g_pFile, "Failed to Compile fragment Shader \n");
DestroyWindow(hwnd);
break;
default:// For Other issues than OpenGL
{
switch (iInitRet)
{
case INIT_CUDA_CHOOSEDEVICE_FAILED:
fprintf_s(g_pFile, "hipChooseDevice Failed \n");
DestroyWindow(hwnd);
break;
/*default:
fprintf_s(g_pFile, "CUDA Failed UnKnown Reasons \n");
DestroyWindow(hwnd);
break;*/
}
// General Failure
fprintf_s(g_pFile, "Failed UnKnown Reasons \n");
DestroyWindow(hwnd);
}
break;
}
ShowWindow(hwnd, SW_SHOWNORMAL);
SetForegroundWindow(hwnd);
SetFocus(hwnd);
while (bDone == false)
{
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
if (msg.message == WM_QUIT)
{
bDone = true;
}
else
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
else
{
if (g_bWindowActive)
{
Update();
}
// Show all Animations
Render();
}
}
//UnInitialize();
return ((int)msg.wParam);
}
LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam)
{
int UnInitialize(void);
void FullScreen(void);
bool Resize(int, int);
switch (iMsg)
{
case WM_CREATE:
PostMessage(hwnd, WM_KEYDOWN, (WPARAM)0x46, (LPARAM)NULL);
break;
case WM_SETFOCUS:
g_bWindowActive = true;
break;
case WM_KILLFOCUS:
g_bWindowActive = false;
break;
case WM_KEYDOWN:
switch (LOWORD(wParam))
{
case VK_ESCAPE:
DestroyWindow(hwnd);
break;
case 0x46: // 'f' or 'F'
//MessageBox(hwnd, TEXT("F is pressed"), TEXT("Status"), MB_OK);
FullScreen();
break;
case 0x48: // 'h' or 'H'
gpu_cpu_Switch = (gpu_cpu_Switch) ? false : true;
break;
default:
break;
}
break;
case WM_SIZE:
Resize(LOWORD(lParam), HIWORD(lParam));
break;
case WM_ERASEBKGND:
return(0);
//break;
case WM_CLOSE:
DestroyWindow(hwnd);
break;
case WM_DESTROY:
UnInitialize();
PostQuitMessage(0);
break;
default:
break;
}
return (DefWindowProc(hwnd, iMsg, wParam, lParam));
}
int Initialize(void)
{
bool Resize(int, int);
// Shader Programs
GLuint iVertexShaderObject = 0;
GLuint iFragmentShaderObject = 0;
// CUDA Init Vriables
hipDeviceProp_t prop;
int dev;
hipError_t status;
int iPixelIndex = 0;
PIXELFORMATDESCRIPTOR pfd;
GLenum err = NULL; // GLEW Error codes
SecureZeroMemory(&pfd, sizeof(pfd));
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cRedBits = 8;
pfd.cGreenBits = 8;
pfd.cBlueBits = 8;
pfd.cAlphaBits = 8;
g_hdc = GetDC(g_hwnd);
if (g_hdc == NULL)
{
return INIT_FAIL_NO_HDC;
}
iPixelIndex = ChoosePixelFormat(g_hdc, &pfd);
if (iPixelIndex == 0)
{
return INIT_FAIL_NO_PIXEL_FORMAT;
}
if (SetPixelFormat(g_hdc, iPixelIndex, &pfd) == FALSE)
{
return INIT_FAIL_SET_PIXEL_FORMAT;
}
g_hrc = wglCreateContext(g_hdc);
if (g_hrc == NULL)
{
return INIT_FAIL_BRIDGE_CONTEX_CREATION;
}
if (wglMakeCurrent(g_hdc, g_hrc) == FALSE)
{
return INIT_FAIL_BRIDGE_CONTEX_SET;
}
// Enables Feature Required for Programable Pipeline
err = glewInit();
if (err != GLEW_OK)
{
return INIT_FAIL_GLEW_INIT;
}
// GL information Start
fprintf_s(g_pFile, "SHADER_INFO : Vendor is : %s\n", glGetString(GL_VENDOR));
fprintf_s(g_pFile, "SHADER_INFO : Renderer is : %s\n", glGetString(GL_RENDER));
fprintf_s(g_pFile, "SHADER_INFO : OpenGL Version is : %s\n", glGetString(GL_VERSION));
fprintf_s(g_pFile, "SHADER_INFO : GLSL Version is : %s\n", glGetString(GL_SHADING_LANGUAGE_VERSION));
//fprintf_s(g_pFile, "SHADER_INFO : Extention is : %s \n", glGetString(GL_EXTENSIONS));
// GL information End
// Cuda Init Start
SecureZeroMemory((void*)&prop, sizeof(prop));
// Compute Capability 2.0 and beyond
prop.major = 2;
prop.minor = 0;
status = hipChooseDevice(&dev,&prop);
if (status != hipSuccess)
{
return INIT_CUDA_CHOOSEDEVICE_FAILED;
}
/*// Declared Depricated....!!
status = hipGLSetGLDevice(dev);
if (status != hipSuccess)
{
return INIT_CUDA_SETGLDEVICE_FAILED;
}*/
// Cuda Init Stop
/// Sam : all Shader Code Start
/*Vertex Shader Start*/
iVertexShaderObject = glCreateShader(GL_VERTEX_SHADER);
const GLchar *vertexShaderSourceCode = "#version 450 core" \
"\n" \
"layout (location = 0)in vec4 vPosition;" \
"layout (location = 3)in vec2 vTexture0_Coord;;" \
"layout (location = 0)out vec2 out_Texture0_Coord;" \
"uniform mat4 u_model_matrix,u_view_matrix,u_projection_matrix;" \
"void main(void)" \
"{" \
/*" gl_Position = u_projection_matrix * u_view_matrix * u_model_matrix * vPosition;" \*/
" gl_Position = vPosition;" \
" out_Texture0_Coord = vTexture0_Coord;" \
"}";
glShaderSource(iVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL);
// Compile it
glCompileShader(iVertexShaderObject);
GLint iInfoLogLength = 0;
GLint iShaderCompileStatus = 0;
GLchar *szInfoLog = NULL;
glGetShaderiv(iVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus);
if (iShaderCompileStatus == GL_FALSE)
{
glGetShaderiv(iVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(iVertexShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf_s(g_pFile, "ERROR : Vertex Shader Compilation Log : %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_VERTEX_SHADER_COMPILATION_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/*Vertex Shader End*/
/*Fragment Shader Start*/
iFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER);
const GLchar *fragmentShaderSourceCode = "#version 450 core" \
"\n" \
"layout (location = 0)in vec2 out_Texture0_Coord;" \
"layout (location = 0)out vec4 FragColor;" \
"uniform sampler2D u_texture0_sampler;" \
"void main(void)" \
"{" \
" FragColor = texture(u_texture0_sampler,out_Texture0_Coord);" \
"}";
glShaderSource(iFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL);
glCompileShader(iFragmentShaderObject);
iInfoLogLength = 0;
iShaderCompileStatus = 0;
szInfoLog = NULL;
glGetShaderiv(iFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus);
if (iShaderCompileStatus == GL_FALSE)
{
glGetShaderiv(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf(g_pFile, "ERROR: Fragment Shader Compilation Log : %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_FRAGMENT_SHADER_COMPILATION_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/*Fragment Shader End*/
/* Shader Program Start */
g_ShaderProgramObject = glCreateProgram();
glAttachShader(g_ShaderProgramObject, iVertexShaderObject);
glAttachShader(g_ShaderProgramObject, iFragmentShaderObject);
glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_POSITION, "vPosition");
glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_TEXTURE0, "vTexture0_Coord");
glLinkProgram(g_ShaderProgramObject);
GLint iShaderLinkStatus = 0;
iInfoLogLength = 0;
glGetProgramiv(g_ShaderProgramObject, GL_LINK_STATUS, &iShaderLinkStatus);
if (iShaderLinkStatus == GL_FALSE)
{
glGetProgramiv(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf_s(g_pFile, "ERROR : Linking Shader Program Objects Failed %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_LINK_SHADER_PROGRAM_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/* Shader Program End */
/*Setup Uniforms Start*/
g_Uniform_Model_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_model_matrix");
g_Uniform_Projection_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_projection_matrix");
g_Uniform_View_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_view_matrix");
//g_uniform_TextureSampler = glGetUniformLocation(g_ShaderProgramObject, "u_texture0_sampler");
/*Setup Uniforms End*/
/* Fill Buffers Start*/
const GLfloat squareVertices[] = {
-1.0f, 1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f
};
const GLfloat squareTexCords[] =
{
0.0f, 1.0f,
0.0f, 0.0f,
1.0f,0.0f,
1.0f,1.0f
};
glGenVertexArrays(1, &g_VertexArrayObject);//VAO
glBindVertexArray(g_VertexArrayObject);
glGenBuffers(1, &g_VertexBufferObject_Position);// vbo position
glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_Position);
glBufferData(GL_ARRAY_BUFFER, sizeof(squareVertices), squareVertices, GL_STATIC_DRAW);
glVertexAttribPointer(SAM_ATTRIBUTE_POSITION, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(SAM_ATTRIBUTE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffers(1, &g_VertexBufferObject_TexCoord); // vbo texture
glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_TexCoord);
glBufferData(GL_ARRAY_BUFFER, sizeof(squareTexCords), squareTexCords, GL_STATIC_DRAW);
glVertexAttribPointer(SAM_ATTRIBUTE_TEXTURE0, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(SAM_ATTRIBUTE_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
/* Fill Buffers End*/
/// Sam : all Shader Code End
glGenBuffers(1, &bufferOBJ);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glBufferData(GL_PIXEL_UNPACK_BUFFER, DIM * DIM * 4, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
status = hipGraphicsGLRegisterBuffer(&resource, bufferOBJ,hipGraphicsMapFlagsNone);
if (status != hipSuccess)
{
return INIT_CUDA_REGISTER_BUFFER_FAILED;
}
/* Fill Buffers End*/
/// Sam : all Shader Code End
// Prepare Texture to take from PBO
glGenTextures(1,&g_TextureID);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, DIM, DIM, 0, GL_BGRA,
GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glClearDepth(1.0f);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glClearColor(0.125f, 0.125f, 0.125f, 1.0f);
g_PersPectiveProjectionMatrix = vmath::mat4::identity();
Resize(WIN_WIDTH, WIN_HEIGHT);
return INIT_ALL_OK;
}
int Update(void)
{
void updatePixels(GLubyte* dst, int size, float animate);
void updatePixels1(GLubyte* dst, int size, float animate);
if (animation_flag)
{
g_fanimate = g_fanimate + 0.005f;
if ( (g_fanimate >1.0f) )
{
animation_flag = false;
}
}
else
{
g_fanimate = g_fanimate - 0.005f;
if ((g_fanimate <0.0f))
{
animation_flag = true;
}
}
if (gpu_cpu_Switch==true)
{
uchar4 *devPtr = NULL;
size_t size;
hipError_t status;
status = hipGraphicsMapResources(1, &resource, NULL);
if (status != hipSuccess)
{
return CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED;
}
status = hipGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource);
if (status != hipSuccess)
{
return CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED;
}
// Run the kernel
dim3 grids(DIM / 8, DIM / 8);
dim3 threads(8, 8);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads) , 0, 0, devPtr,g_fanimate);
//kernel1 << < grids, threads >> >(devPtr,g_fanimate);
// Unmap the resource for use
status = hipGraphicsUnmapResources(1, &resource, NULL);
if (status != hipSuccess)
{
return CUDA_INIT_GRAPHICS_UNMAPP_RES_FAILED;
}
}
else
{
// cpu updatation
/*glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
GLubyte *ptr = (GLubyte*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER,GL_WRITE_ONLY);
if (ptr)
{
updatePixels(ptr, DIM*DIM*4, g_fanimate);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);*/
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
GLubyte *ptr = (GLubyte*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
if (ptr)
{
updatePixels1(ptr, DIM*DIM * 4, g_fanimate);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glTexSubImage2D(GL_TEXTURE_2D,0,0,0,DIM,DIM,GL_BGRA,GL_UNSIGNED_BYTE,NULL);
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
return CUDA_INIT_ALL_OK;
}
void updatePixels1(GLubyte* dst, int size,float animate)
{
if (!dst)
return;
int *ptr = (int *)dst;// to operate 4 bytes at once
for (int i = 0; i < DIM; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = j + i * DIM;
float fx = i / (float)DIM - 0.5f;
float fy = j / (float)DIM - 0.5f;
unsigned char green = ((unsigned char)128) + ((unsigned char)127) * (sinf(fabsf(fx * 100 * animate) - fabsf(fy * 100 * animate)) );
*(ptr) = 255 - green;
ptr++;
}
}
}
void updatePixels(GLubyte* dst, int size, float animate)
{
static int color = 0;
if (!dst)
return;
int *ptr = (int *)dst;// to operate 4 bytes at once
for (int i = 0; i < DIM; i++)
{
for (int j = 0; j < DIM; j++)
{
*ptr = color;
++ptr;
}
color += 257;
}
++color;
}
void Render(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vmath::mat4 modelMatrix = vmath::mat4::identity();
vmath::mat4 viewMatrix = vmath::mat4::identity();
glUseProgram(g_ShaderProgramObject);
modelMatrix = vmath::translate(0.0f, 0.0f, -3.0f);
glUniformMatrix4fv(g_Uniform_Model_Matrix, 1, GL_FALSE, modelMatrix);
glUniformMatrix4fv(g_Uniform_View_Matrix, 1, GL_FALSE, viewMatrix);
glUniformMatrix4fv(g_Uniform_Projection_Matrix, 1, GL_FALSE, g_PersPectiveProjectionMatrix);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glUniform1i(g_uniform_TextureSampler, 0);
glBindVertexArray(g_VertexArrayObject);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glBindVertexArray(0);
SwapBuffers(g_hdc);
}
void FullScreen(void)
{
MONITORINFO mi = { sizeof(mi) };
dwStyle = GetWindowLong(g_hwnd, GWL_STYLE);
if (g_bFullScreen == false)
{
if (dwStyle & WS_OVERLAPPEDWINDOW)
{
if (GetWindowPlacement(g_hwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(g_hwnd, MONITORINFOF_PRIMARY), &mi))
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW);
SetWindowPos(g_hwnd, HWND_TOP,
mi.rcMonitor.left, mi.rcMonitor.top,
mi.rcMonitor.right - mi.rcMonitor.left,
mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED);
}
}
ShowCursor(FALSE);
g_bFullScreen = true;
}
else
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW);
SetWindowPlacement(g_hwnd, &wpPrev);
SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE);
ShowCursor(TRUE);
g_bFullScreen = false;
}
}
bool Resize(int iWidth, int iHeight)
{
if (iHeight <= 0)
{
iHeight = 1;
}
glViewport(0, 0, (GLsizei)iWidth, (GLsizei)iHeight);
g_PersPectiveProjectionMatrix = vmath::perspective(45.0f, (float)iWidth / (float)iHeight, 0.1f, 100.0f);
return true;
}
int UnInitialize(void)
{
if (g_bFullScreen == true)
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW);
SetWindowPlacement(g_hwnd, &wpPrev);
SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE);
ShowCursor(TRUE);
g_bFullScreen = false;
}
/* Clear CUDA Resources Start */
hipGraphicsUnmapResources(1, &resource, NULL);
hipGraphicsUnregisterResource(resource);
hipDeviceReset();
/* Clear CUDA Resources End */
/* Clear OpenGL Resources Start */
if (bufferOBJ)
{
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glDeleteBuffers(1, &bufferOBJ);
bufferOBJ = NULL;
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
if (g_VertexBufferObject_TexCoord)
{
glDeleteBuffers(1, &g_VertexBufferObject_TexCoord);
g_VertexBufferObject_TexCoord = NULL;
}
if (g_VertexBufferObject_Position)
{
glDeleteBuffers(1, &g_VertexBufferObject_Position);
g_VertexBufferObject_Position = NULL;
}
if (g_VertexArrayObject)
{
glDeleteVertexArrays(1, &g_VertexArrayObject);
g_VertexArrayObject = NULL;
}
glUseProgram(0);
if (g_ShaderProgramObject)
{
GLsizei iShaderCount;
GLsizei iShaderNumber;
glUseProgram(g_ShaderProgramObject);
glGetProgramiv(g_ShaderProgramObject, GL_ATTACHED_SHADERS, &iShaderCount);
GLuint *pShaders = (GLuint*)calloc(iShaderCount, sizeof(GLuint));
if (pShaders)
{
glGetAttachedShaders(g_ShaderProgramObject, iShaderCount, &iShaderCount, pShaders);
for (iShaderNumber = 0; iShaderNumber < iShaderCount; iShaderNumber++)
{
glDetachShader(g_ShaderProgramObject, pShaders[iShaderNumber]);
glDeleteShader(pShaders[iShaderNumber]);
pShaders[iShaderNumber] = 0;
}
free(pShaders);
pShaders = NULL;
}
glUseProgram(0);
glDeleteProgram(g_ShaderProgramObject);
g_ShaderProgramObject = NULL;
}
/* Clear OpenGL Resources End */
if (wglGetCurrentContext() == g_hrc)
{
wglMakeCurrent(NULL, NULL);
}
if (g_hrc)
{
wglDeleteContext(g_hrc);
g_hrc = NULL;
}
if (g_hdc)
{
ReleaseDC(g_hwnd, g_hdc);
g_hdc = NULL;
}
if (g_pFile)
{
fprintf_s(g_pFile, "Closing File \n");
fclose(g_pFile);
g_pFile = NULL;
}
return 0;
}
| fda6b681a9821972ca5b7eaf69e61b33e883692e.cu | #include<Windows.h>
#include<stdio.h>
#include<stdlib.h>
#include<gl\glew.h>
#include<gl\GL.h>
#include<math.h>
#include<cuda.h>
#include<cuda_gl_interop.h>
#include"vmath.h"
#define WIN_WIDTH 800
#define WIN_HEIGHT 600
#define DIM 4096
#pragma comment(lib,"user32.lib")
#pragma comment(lib,"gdi32.lib")
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"opengl32.lib")
//using namespace std;
enum InitErrorCodes
{
INIT_VERTEX_SHADER_COMPILATION_FAILED = -9,
INIT_FRAGMENT_SHADER_COMPILATION_FAILED,
INIT_LINK_SHADER_PROGRAM_FAILED,
INIT_FAIL_GLEW_INIT,
INIT_FAIL_BRIDGE_CONTEX_SET,
INIT_FAIL_BRIDGE_CONTEX_CREATION,
INIT_FAIL_SET_PIXEL_FORMAT,
INIT_FAIL_NO_PIXEL_FORMAT,
INIT_FAIL_NO_HDC,
INIT_ALL_OK,
};
enum CUDAInitErrorCodes
{
/* min no -10 */
INIT_CUDA_SETGLDEVICE_FAILED = -21,
INIT_CUDA_REGISTER_BUFFER_FAILED,
CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED,
CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED,
CUDA_INIT_GRAPHICS_UNMAPP_RES_FAILED,
INIT_CUDA_CHOOSEDEVICE_FAILED = -10,
CUDA_INIT_ALL_OK=0,
};
enum attributeBindLocations
{
SAM_ATTRIBUTE_POSITION = 0,
SAM_ATTRIBUTE_COLOR,
SAM_ATTRIBUTE_NORNAL,
SAM_ATTRIBUTE_TEXTURE0,
};
LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam);
bool g_bWindowActive = false;
HWND g_hwnd = NULL;
HDC g_hdc = NULL;
HGLRC g_hrc = NULL;
WINDOWPLACEMENT wpPrev;
DWORD dwStyle;
bool g_bFullScreen = false;
FILE *g_pFile = NULL;
// Shaders
//GLuint iVertexShaderObject = 0;
//GLuint iFragmentShaderObject = 0;
GLuint g_ShaderProgramObject = 0;
// All Vertex Buffers
GLuint g_VertexArrayObject = 0;
GLuint g_VertexBufferObject_Position = 0;
GLuint g_VertexBufferObject_TexCoord = 0;
// Uniforms
GLuint g_Uniform_Model_Matrix = 0;
GLuint g_Uniform_View_Matrix = 0;
GLuint g_Uniform_Projection_Matrix = 0;
// sampler
GLuint g_uniform_TextureSampler;
GLuint g_TextureID;
// Projection
vmath::mat4 g_PersPectiveProjectionMatrix;
// CUDA Res
GLuint bufferOBJ;
cudaGraphicsResource *resource=NULL;
bool gpu_cpu_Switch = false;// default on cpu
float g_fanimate = 0.0f;
bool animation_flag = false;
__global__ void kernel(uchar4 *ptr, float time)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at the position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
unsigned char green = 128 + 127 * sinf(fabsf(fx * 100 * time) - fabsf(fy * 100* time));
//ptr[(y * DIM) + x].x = 0;
ptr[offset].x = 0;
ptr[offset].y = green;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
__global__ void kernel1(uchar4 *ptr, float time)
{
__shared__ unsigned char color;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at the position
float fx = x / (float)DIM - 0.5f;
float fy = y / (float)DIM - 0.5f;
color = y * fabsf(fy * 100 * time);
color += 25 ;
__syncthreads();
//ptr[(y * DIM) + x].x = 0;
ptr[offset].x = color;
ptr[offset].y = color;
ptr[offset].z = color;
ptr[offset].w = 255;
}
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR szCmdLine, int iCmdShow)
{
//int UnInitialize(void);
int Initialize(void);
int Update(void);
void Render(void);
// Windowing Elelments
WNDCLASSEX wndclass;
MSG msg;
HWND hwnd = NULL;
TCHAR szClassName[] = TEXT("Sam_OGL");
RECT windowRect;
// Game Loop Control
bool bDone = false;
// Initialization Status
int iInitRet = 0;
SecureZeroMemory((void*)&wndclass, sizeof(wndclass));
wndclass.cbSize = sizeof(wndclass);
wndclass.cbClsExtra = 0;
wndclass.cbWndExtra = 0;
wndclass.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
wndclass.lpfnWndProc = MainWndProc;
wndclass.lpszClassName = szClassName;
wndclass.lpszMenuName = NULL;
wndclass.hInstance = hInstance;
wndclass.hbrBackground = (HBRUSH)GetStockObject(GRAY_BRUSH);
wndclass.hIcon = LoadIcon(hInstance, IDI_APPLICATION);
wndclass.hIconSm = LoadIcon(hInstance, IDI_APPLICATION);
wndclass.hCursor = LoadCursor(hInstance, IDC_ARROW);
if (!RegisterClassEx(&wndclass))
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not RegisterClass() "), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
if ((fopen_s(&g_pFile, "SamLogFile.txt", "w+")) == 0)
{
fprintf_s(g_pFile, "File Opened Successfully. \n");
}
else
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could not open File"), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
SecureZeroMemory((void*)&windowRect, sizeof(windowRect));
windowRect.left = 0;
windowRect.top = 0;
windowRect.bottom = WIN_HEIGHT;
windowRect.right = WIN_WIDTH;
AdjustWindowRectEx(&windowRect, WS_OVERLAPPEDWINDOW, FALSE, WS_EX_APPWINDOW);
hwnd = CreateWindowEx(WS_EX_APPWINDOW, szClassName,
TEXT("First_OpenGL_Window"),
WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN | WS_CLIPSIBLINGS | WS_VISIBLE,
CW_USEDEFAULT, CW_USEDEFAULT,
windowRect.right - windowRect.left,
windowRect.bottom - windowRect.top,
NULL, NULL, hInstance, NULL);
if (hwnd == NULL)
{
MessageBox(NULL, TEXT("Issue...!!!"), TEXT("Could Not CreateWindow() "), MB_OK | MB_ICONERROR);
exit(EXIT_FAILURE);
}
g_hwnd = hwnd;
iInitRet = Initialize();
switch (iInitRet)
{
case INIT_ALL_OK:
fprintf_s(g_pFile, "Initialize Complete \n");
break;
case INIT_FAIL_NO_HDC:
fprintf_s(g_pFile, "Failed to Get HDC \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_NO_PIXEL_FORMAT:
fprintf_s(g_pFile, "Failed to get PixelFormat \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_SET_PIXEL_FORMAT:
fprintf_s(g_pFile, "Failed to set Pixel Format \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_BRIDGE_CONTEX_CREATION:
fprintf_s(g_pFile, "Failed to wglCreateContext \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_BRIDGE_CONTEX_SET:
fprintf_s(g_pFile, "Failed to wglMakeCurrent \n");
DestroyWindow(hwnd);
break;
case INIT_FAIL_GLEW_INIT:
fprintf_s(g_pFile, "Failed to glewInit \n");
DestroyWindow(hwnd);
break;
case INIT_LINK_SHADER_PROGRAM_FAILED:
fprintf_s(g_pFile, "Failed to Link Shader Program Object \n");
DestroyWindow(hwnd);
break;
case INIT_VERTEX_SHADER_COMPILATION_FAILED:
fprintf_s(g_pFile, "Failed to Compile vertex Shader \n");
DestroyWindow(hwnd);
break;
case INIT_FRAGMENT_SHADER_COMPILATION_FAILED:
fprintf_s(g_pFile, "Failed to Compile fragment Shader \n");
DestroyWindow(hwnd);
break;
default:// For Other issues than OpenGL
{
switch (iInitRet)
{
case INIT_CUDA_CHOOSEDEVICE_FAILED:
fprintf_s(g_pFile, "cudaChooseDevice Failed \n");
DestroyWindow(hwnd);
break;
/*default:
fprintf_s(g_pFile, "CUDA Failed UnKnown Reasons \n");
DestroyWindow(hwnd);
break;*/
}
// General Failure
fprintf_s(g_pFile, "Failed UnKnown Reasons \n");
DestroyWindow(hwnd);
}
break;
}
ShowWindow(hwnd, SW_SHOWNORMAL);
SetForegroundWindow(hwnd);
SetFocus(hwnd);
while (bDone == false)
{
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
{
if (msg.message == WM_QUIT)
{
bDone = true;
}
else
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
else
{
if (g_bWindowActive)
{
Update();
}
// Show all Animations
Render();
}
}
//UnInitialize();
return ((int)msg.wParam);
}
LRESULT CALLBACK MainWndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam)
{
int UnInitialize(void);
void FullScreen(void);
bool Resize(int, int);
switch (iMsg)
{
case WM_CREATE:
PostMessage(hwnd, WM_KEYDOWN, (WPARAM)0x46, (LPARAM)NULL);
break;
case WM_SETFOCUS:
g_bWindowActive = true;
break;
case WM_KILLFOCUS:
g_bWindowActive = false;
break;
case WM_KEYDOWN:
switch (LOWORD(wParam))
{
case VK_ESCAPE:
DestroyWindow(hwnd);
break;
case 0x46: // 'f' or 'F'
//MessageBox(hwnd, TEXT("F is pressed"), TEXT("Status"), MB_OK);
FullScreen();
break;
case 0x48: // 'h' or 'H'
gpu_cpu_Switch = (gpu_cpu_Switch) ? false : true;
break;
default:
break;
}
break;
case WM_SIZE:
Resize(LOWORD(lParam), HIWORD(lParam));
break;
case WM_ERASEBKGND:
return(0);
//break;
case WM_CLOSE:
DestroyWindow(hwnd);
break;
case WM_DESTROY:
UnInitialize();
PostQuitMessage(0);
break;
default:
break;
}
return (DefWindowProc(hwnd, iMsg, wParam, lParam));
}
int Initialize(void)
{
bool Resize(int, int);
// Shader Programs
GLuint iVertexShaderObject = 0;
GLuint iFragmentShaderObject = 0;
// CUDA Init Vriables
cudaDeviceProp prop;
int dev;
cudaError status;
int iPixelIndex = 0;
PIXELFORMATDESCRIPTOR pfd;
GLenum err = NULL; // GLEW Error codes
SecureZeroMemory(&pfd, sizeof(pfd));
pfd.nSize = sizeof(pfd);
pfd.nVersion = 1;
pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 32;
pfd.cRedBits = 8;
pfd.cGreenBits = 8;
pfd.cBlueBits = 8;
pfd.cAlphaBits = 8;
g_hdc = GetDC(g_hwnd);
if (g_hdc == NULL)
{
return INIT_FAIL_NO_HDC;
}
iPixelIndex = ChoosePixelFormat(g_hdc, &pfd);
if (iPixelIndex == 0)
{
return INIT_FAIL_NO_PIXEL_FORMAT;
}
if (SetPixelFormat(g_hdc, iPixelIndex, &pfd) == FALSE)
{
return INIT_FAIL_SET_PIXEL_FORMAT;
}
g_hrc = wglCreateContext(g_hdc);
if (g_hrc == NULL)
{
return INIT_FAIL_BRIDGE_CONTEX_CREATION;
}
if (wglMakeCurrent(g_hdc, g_hrc) == FALSE)
{
return INIT_FAIL_BRIDGE_CONTEX_SET;
}
// Enables Feature Required for Programable Pipeline
err = glewInit();
if (err != GLEW_OK)
{
return INIT_FAIL_GLEW_INIT;
}
// GL information Start
fprintf_s(g_pFile, "SHADER_INFO : Vendor is : %s\n", glGetString(GL_VENDOR));
fprintf_s(g_pFile, "SHADER_INFO : Renderer is : %s\n", glGetString(GL_RENDER));
fprintf_s(g_pFile, "SHADER_INFO : OpenGL Version is : %s\n", glGetString(GL_VERSION));
fprintf_s(g_pFile, "SHADER_INFO : GLSL Version is : %s\n", glGetString(GL_SHADING_LANGUAGE_VERSION));
//fprintf_s(g_pFile, "SHADER_INFO : Extention is : %s \n", glGetString(GL_EXTENSIONS));
// GL information End
// Cuda Init Start
SecureZeroMemory((void*)&prop, sizeof(prop));
// Compute Capability 2.0 and beyond
prop.major = 2;
prop.minor = 0;
status = cudaChooseDevice(&dev,&prop);
if (status != cudaSuccess)
{
return INIT_CUDA_CHOOSEDEVICE_FAILED;
}
/*// Declared Depricated....!!
status = cudaGLSetGLDevice(dev);
if (status != cudaSuccess)
{
return INIT_CUDA_SETGLDEVICE_FAILED;
}*/
// Cuda Init Stop
/// Sam : all Shader Code Start
/*Vertex Shader Start*/
iVertexShaderObject = glCreateShader(GL_VERTEX_SHADER);
const GLchar *vertexShaderSourceCode = "#version 450 core" \
"\n" \
"layout (location = 0)in vec4 vPosition;" \
"layout (location = 3)in vec2 vTexture0_Coord;;" \
"layout (location = 0)out vec2 out_Texture0_Coord;" \
"uniform mat4 u_model_matrix,u_view_matrix,u_projection_matrix;" \
"void main(void)" \
"{" \
/*" gl_Position = u_projection_matrix * u_view_matrix * u_model_matrix * vPosition;" \*/
" gl_Position = vPosition;" \
" out_Texture0_Coord = vTexture0_Coord;" \
"}";
glShaderSource(iVertexShaderObject, 1, (const GLchar**)&vertexShaderSourceCode, NULL);
// Compile it
glCompileShader(iVertexShaderObject);
GLint iInfoLogLength = 0;
GLint iShaderCompileStatus = 0;
GLchar *szInfoLog = NULL;
glGetShaderiv(iVertexShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus);
if (iShaderCompileStatus == GL_FALSE)
{
glGetShaderiv(iVertexShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(iVertexShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf_s(g_pFile, "ERROR : Vertex Shader Compilation Log : %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_VERTEX_SHADER_COMPILATION_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/*Vertex Shader End*/
/*Fragment Shader Start*/
iFragmentShaderObject = glCreateShader(GL_FRAGMENT_SHADER);
const GLchar *fragmentShaderSourceCode = "#version 450 core" \
"\n" \
"layout (location = 0)in vec2 out_Texture0_Coord;" \
"layout (location = 0)out vec4 FragColor;" \
"uniform sampler2D u_texture0_sampler;" \
"void main(void)" \
"{" \
" FragColor = texture(u_texture0_sampler,out_Texture0_Coord);" \
"}";
glShaderSource(iFragmentShaderObject, 1, (const GLchar**)&fragmentShaderSourceCode, NULL);
glCompileShader(iFragmentShaderObject);
iInfoLogLength = 0;
iShaderCompileStatus = 0;
szInfoLog = NULL;
glGetShaderiv(iFragmentShaderObject, GL_COMPILE_STATUS, &iShaderCompileStatus);
if (iShaderCompileStatus == GL_FALSE)
{
glGetShaderiv(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(iFragmentShaderObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf(g_pFile, "ERROR: Fragment Shader Compilation Log : %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_FRAGMENT_SHADER_COMPILATION_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/*Fragment Shader End*/
/* Shader Program Start */
g_ShaderProgramObject = glCreateProgram();
glAttachShader(g_ShaderProgramObject, iVertexShaderObject);
glAttachShader(g_ShaderProgramObject, iFragmentShaderObject);
glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_POSITION, "vPosition");
glBindAttribLocation(g_ShaderProgramObject, SAM_ATTRIBUTE_TEXTURE0, "vTexture0_Coord");
glLinkProgram(g_ShaderProgramObject);
GLint iShaderLinkStatus = 0;
iInfoLogLength = 0;
glGetProgramiv(g_ShaderProgramObject, GL_LINK_STATUS, &iShaderLinkStatus);
if (iShaderLinkStatus == GL_FALSE)
{
glGetProgramiv(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &iInfoLogLength);
if (iInfoLogLength>0)
{
szInfoLog = (GLchar*)malloc(iInfoLogLength * sizeof(GLchar));
if (szInfoLog != NULL)
{
GLsizei written;
glGetShaderInfoLog(g_ShaderProgramObject, GL_INFO_LOG_LENGTH, &written, szInfoLog);
fprintf_s(g_pFile, "ERROR : Linking Shader Program Objects Failed %s \n", szInfoLog);
free(szInfoLog);
szInfoLog = NULL;
return INIT_LINK_SHADER_PROGRAM_FAILED;
//DestroyWindow(g_hwnd);
//exit(EXIT_FAILURE);
}
}
}
/* Shader Program End */
/*Setup Uniforms Start*/
g_Uniform_Model_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_model_matrix");
g_Uniform_Projection_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_projection_matrix");
g_Uniform_View_Matrix = glGetUniformLocation(g_ShaderProgramObject, "u_view_matrix");
//g_uniform_TextureSampler = glGetUniformLocation(g_ShaderProgramObject, "u_texture0_sampler");
/*Setup Uniforms End*/
/* Fill Buffers Start*/
const GLfloat squareVertices[] = {
-1.0f, 1.0f, 0.0f,
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f
};
const GLfloat squareTexCords[] =
{
0.0f, 1.0f,
0.0f, 0.0f,
1.0f,0.0f,
1.0f,1.0f
};
glGenVertexArrays(1, &g_VertexArrayObject);//VAO
glBindVertexArray(g_VertexArrayObject);
glGenBuffers(1, &g_VertexBufferObject_Position);// vbo position
glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_Position);
glBufferData(GL_ARRAY_BUFFER, sizeof(squareVertices), squareVertices, GL_STATIC_DRAW);
glVertexAttribPointer(SAM_ATTRIBUTE_POSITION, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(SAM_ATTRIBUTE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffers(1, &g_VertexBufferObject_TexCoord); // vbo texture
glBindBuffer(GL_ARRAY_BUFFER, g_VertexBufferObject_TexCoord);
glBufferData(GL_ARRAY_BUFFER, sizeof(squareTexCords), squareTexCords, GL_STATIC_DRAW);
glVertexAttribPointer(SAM_ATTRIBUTE_TEXTURE0, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(SAM_ATTRIBUTE_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
/* Fill Buffers End*/
/// Sam : all Shader Code End
glGenBuffers(1, &bufferOBJ);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glBufferData(GL_PIXEL_UNPACK_BUFFER, DIM * DIM * 4, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
status = cudaGraphicsGLRegisterBuffer(&resource, bufferOBJ,cudaGraphicsMapFlagsNone);
if (status != cudaSuccess)
{
return INIT_CUDA_REGISTER_BUFFER_FAILED;
}
/* Fill Buffers End*/
/// Sam : all Shader Code End
// Prepare Texture to take from PBO
glGenTextures(1,&g_TextureID);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, DIM, DIM, 0, GL_BGRA,
GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glClearDepth(1.0f);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glClearColor(0.125f, 0.125f, 0.125f, 1.0f);
g_PersPectiveProjectionMatrix = vmath::mat4::identity();
Resize(WIN_WIDTH, WIN_HEIGHT);
return INIT_ALL_OK;
}
int Update(void)
{
void updatePixels(GLubyte* dst, int size, float animate);
void updatePixels1(GLubyte* dst, int size, float animate);
if (animation_flag)
{
g_fanimate = g_fanimate + 0.005f;
if ( (g_fanimate >1.0f) )
{
animation_flag = false;
}
}
else
{
g_fanimate = g_fanimate - 0.005f;
if ((g_fanimate <0.0f))
{
animation_flag = true;
}
}
if (gpu_cpu_Switch==true)
{
uchar4 *devPtr = NULL;
size_t size;
cudaError status;
status = cudaGraphicsMapResources(1, &resource, NULL);
if (status != cudaSuccess)
{
return CUDA_INIT_GRAPHICS_MAPPED_RES_FAILED;
}
status = cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource);
if (status != cudaSuccess)
{
return CUDA_INIT_GRAPHICS_MAPPED_RES_POINTER_FAILED;
}
// Run the kernel
dim3 grids(DIM / 8, DIM / 8);
dim3 threads(8, 8);
kernel<<< grids, threads >>>(devPtr,g_fanimate);
//kernel1 << < grids, threads >> >(devPtr,g_fanimate);
// Unmap the resource for use
status = cudaGraphicsUnmapResources(1, &resource, NULL);
if (status != cudaSuccess)
{
return CUDA_INIT_GRAPHICS_UNMAPP_RES_FAILED;
}
}
else
{
// cpu updatation
/*glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
GLubyte *ptr = (GLubyte*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER,GL_WRITE_ONLY);
if (ptr)
{
updatePixels(ptr, DIM*DIM*4, g_fanimate);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);*/
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
GLubyte *ptr = (GLubyte*)glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
if (ptr)
{
updatePixels1(ptr, DIM*DIM * 4, g_fanimate);
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glTexSubImage2D(GL_TEXTURE_2D,0,0,0,DIM,DIM,GL_BGRA,GL_UNSIGNED_BYTE,NULL);
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
return CUDA_INIT_ALL_OK;
}
void updatePixels1(GLubyte* dst, int size,float animate)
{
if (!dst)
return;
int *ptr = (int *)dst;// to operate 4 bytes at once
for (int i = 0; i < DIM; i++)
{
for (int j = 0; j < DIM; j++)
{
int offset = j + i * DIM;
float fx = i / (float)DIM - 0.5f;
float fy = j / (float)DIM - 0.5f;
unsigned char green = ((unsigned char)128) + ((unsigned char)127) * (sinf(fabsf(fx * 100 * animate) - fabsf(fy * 100 * animate)) );
*(ptr) = 255 - green;
ptr++;
}
}
}
void updatePixels(GLubyte* dst, int size, float animate)
{
static int color = 0;
if (!dst)
return;
int *ptr = (int *)dst;// to operate 4 bytes at once
for (int i = 0; i < DIM; i++)
{
for (int j = 0; j < DIM; j++)
{
*ptr = color;
++ptr;
}
color += 257;
}
++color;
}
void Render(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vmath::mat4 modelMatrix = vmath::mat4::identity();
vmath::mat4 viewMatrix = vmath::mat4::identity();
glUseProgram(g_ShaderProgramObject);
modelMatrix = vmath::translate(0.0f, 0.0f, -3.0f);
glUniformMatrix4fv(g_Uniform_Model_Matrix, 1, GL_FALSE, modelMatrix);
glUniformMatrix4fv(g_Uniform_View_Matrix, 1, GL_FALSE, viewMatrix);
glUniformMatrix4fv(g_Uniform_Projection_Matrix, 1, GL_FALSE, g_PersPectiveProjectionMatrix);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, g_TextureID);
glUniform1i(g_uniform_TextureSampler, 0);
glBindVertexArray(g_VertexArrayObject);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glBindVertexArray(0);
SwapBuffers(g_hdc);
}
void FullScreen(void)
{
MONITORINFO mi = { sizeof(mi) };
dwStyle = GetWindowLong(g_hwnd, GWL_STYLE);
if (g_bFullScreen == false)
{
if (dwStyle & WS_OVERLAPPEDWINDOW)
{
if (GetWindowPlacement(g_hwnd, &wpPrev) && GetMonitorInfo(MonitorFromWindow(g_hwnd, MONITORINFOF_PRIMARY), &mi))
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle & ~WS_OVERLAPPEDWINDOW);
SetWindowPos(g_hwnd, HWND_TOP,
mi.rcMonitor.left, mi.rcMonitor.top,
mi.rcMonitor.right - mi.rcMonitor.left,
mi.rcMonitor.bottom - mi.rcMonitor.top, SWP_NOZORDER | SWP_FRAMECHANGED);
}
}
ShowCursor(FALSE);
g_bFullScreen = true;
}
else
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW);
SetWindowPlacement(g_hwnd, &wpPrev);
SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE);
ShowCursor(TRUE);
g_bFullScreen = false;
}
}
bool Resize(int iWidth, int iHeight)
{
if (iHeight <= 0)
{
iHeight = 1;
}
glViewport(0, 0, (GLsizei)iWidth, (GLsizei)iHeight);
g_PersPectiveProjectionMatrix = vmath::perspective(45.0f, (float)iWidth / (float)iHeight, 0.1f, 100.0f);
return true;
}
int UnInitialize(void)
{
if (g_bFullScreen == true)
{
SetWindowLong(g_hwnd, GWL_STYLE, dwStyle | WS_OVERLAPPEDWINDOW);
SetWindowPlacement(g_hwnd, &wpPrev);
SetWindowPos(g_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOOWNERZORDER | SWP_NOZORDER | SWP_FRAMECHANGED | SWP_NOMOVE | SWP_NOSIZE);
ShowCursor(TRUE);
g_bFullScreen = false;
}
/* Clear CUDA Resources Start */
cudaGraphicsUnmapResources(1, &resource, NULL);
cudaGraphicsUnregisterResource(resource);
cudaDeviceReset();
/* Clear CUDA Resources End */
/* Clear OpenGL Resources Start */
if (bufferOBJ)
{
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, bufferOBJ);
glDeleteBuffers(1, &bufferOBJ);
bufferOBJ = NULL;
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
if (g_VertexBufferObject_TexCoord)
{
glDeleteBuffers(1, &g_VertexBufferObject_TexCoord);
g_VertexBufferObject_TexCoord = NULL;
}
if (g_VertexBufferObject_Position)
{
glDeleteBuffers(1, &g_VertexBufferObject_Position);
g_VertexBufferObject_Position = NULL;
}
if (g_VertexArrayObject)
{
glDeleteVertexArrays(1, &g_VertexArrayObject);
g_VertexArrayObject = NULL;
}
glUseProgram(0);
if (g_ShaderProgramObject)
{
GLsizei iShaderCount;
GLsizei iShaderNumber;
glUseProgram(g_ShaderProgramObject);
glGetProgramiv(g_ShaderProgramObject, GL_ATTACHED_SHADERS, &iShaderCount);
GLuint *pShaders = (GLuint*)calloc(iShaderCount, sizeof(GLuint));
if (pShaders)
{
glGetAttachedShaders(g_ShaderProgramObject, iShaderCount, &iShaderCount, pShaders);
for (iShaderNumber = 0; iShaderNumber < iShaderCount; iShaderNumber++)
{
glDetachShader(g_ShaderProgramObject, pShaders[iShaderNumber]);
glDeleteShader(pShaders[iShaderNumber]);
pShaders[iShaderNumber] = 0;
}
free(pShaders);
pShaders = NULL;
}
glUseProgram(0);
glDeleteProgram(g_ShaderProgramObject);
g_ShaderProgramObject = NULL;
}
/* Clear OpenGL Resources End */
if (wglGetCurrentContext() == g_hrc)
{
wglMakeCurrent(NULL, NULL);
}
if (g_hrc)
{
wglDeleteContext(g_hrc);
g_hrc = NULL;
}
if (g_hdc)
{
ReleaseDC(g_hwnd, g_hdc);
g_hdc = NULL;
}
if (g_pFile)
{
fprintf_s(g_pFile, "Closing File \n");
fclose(g_pFile);
g_pFile = NULL;
}
return 0;
}
|
f9b7680344e120b5884706c1640e3074850995f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_calc_gjL.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
hipMalloc(&l, XSIZE*YSIZE);
int *s = NULL;
hipMalloc(&s, XSIZE*YSIZE);
int *sw = NULL;
hipMalloc(&sw, XSIZE*YSIZE);
float *z_arr = NULL;
hipMalloc(&z_arr, XSIZE*YSIZE);
float *a_arr = NULL;
hipMalloc(&a_arr, XSIZE*YSIZE);
float *t_arr = NULL;
hipMalloc(&t_arr, XSIZE*YSIZE);
float *gjl = NULL;
hipMalloc(&gjl, XSIZE*YSIZE);
float *w_arr = NULL;
hipMalloc(&w_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_calc_gjL), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_calc_gjL), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_calc_gjL), dim3(gridBlock),dim3(threadBlock), 0, 0, layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f9b7680344e120b5884706c1640e3074850995f1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_calc_gjL.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int layer_id = 1;
int *l = NULL;
cudaMalloc(&l, XSIZE*YSIZE);
int *s = NULL;
cudaMalloc(&s, XSIZE*YSIZE);
int *sw = NULL;
cudaMalloc(&sw, XSIZE*YSIZE);
float *z_arr = NULL;
cudaMalloc(&z_arr, XSIZE*YSIZE);
float *a_arr = NULL;
cudaMalloc(&a_arr, XSIZE*YSIZE);
float *t_arr = NULL;
cudaMalloc(&t_arr, XSIZE*YSIZE);
float *gjl = NULL;
cudaMalloc(&gjl, XSIZE*YSIZE);
float *w_arr = NULL;
cudaMalloc(&w_arr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_calc_gjL<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_calc_gjL<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_calc_gjL<<<gridBlock,threadBlock>>>(layer_id,l,s,sw,z_arr,a_arr,t_arr,gjl,w_arr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
908807cd89a95b64ea3bd6370054dba88fb081ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <time.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char**argv) {
Timer timer;
hipError_t cuda_ret;
time_t t;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...");
//flushes the output buffer
fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
//converts string argv into int
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
/* Intializes random number generator */
srand((unsigned) time(&t));
/* Intializes First Array w/ n random numbers */
float* A_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++)
{
A_h[i] = (rand()%100)/100.00;
}
/* Intializes second Array w/ n random numbers */
float* B_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++)
{
B_h[i] = (rand()%100)/100.00;
}
/* Intializes final Array w/ n slots */
float* C_h = (float*) malloc( sizeof(float)*n );
//Outputs time taken to intialize host variables
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
float* A_d;
hipMalloc(&A_d, sizeof(float)*n );
float* B_d;
hipMalloc(&B_d, sizeof(float)*n );
float* C_d;
hipMalloc(&C_d, sizeof(float)*n );
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, sizeof(float)*n, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, sizeof(float)*n, hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1)/threadsPerBlock;
hipLaunchKernelGGL(( vecAddKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A_d, B_d, C_d, n);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h, C_d, sizeof(float)*n, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results...");
fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| 908807cd89a95b64ea3bd6370054dba88fb081ed.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <time.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char**argv) {
Timer timer;
cudaError_t cuda_ret;
time_t t;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...");
//flushes the output buffer
fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
//converts string argv into int
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
/* Intializes random number generator */
srand((unsigned) time(&t));
/* Intializes First Array w/ n random numbers */
float* A_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++)
{
A_h[i] = (rand()%100)/100.00;
}
/* Intializes second Array w/ n random numbers */
float* B_h = (float*) malloc( sizeof(float)*n );
for (unsigned int i=0; i < n; i++)
{
B_h[i] = (rand()%100)/100.00;
}
/* Intializes final Array w/ n slots */
float* C_h = (float*) malloc( sizeof(float)*n );
//Outputs time taken to intialize host variables
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
float* A_d;
cudaMalloc(&A_d, sizeof(float)*n );
float* B_d;
cudaMalloc(&B_d, sizeof(float)*n );
float* C_d;
cudaMalloc(&C_d, sizeof(float)*n );
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1)/threadsPerBlock;
vecAddKernel<<<blocksPerGrid, threadsPerBlock>>>(A_d, B_d, C_d, n);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...");
fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h, C_d, sizeof(float)*n, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results...");
fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
91f854709838b9270c1a68946a556e90eebb3475.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void RoIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolBackward(
const int nthreads,
const T* grad_output,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* grad_input,
const T* rois,
const int n_stride,
const int c_stride,
const int h_stride,
const int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] {
hipLaunchKernelGGL(( RoIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>(),
argmax.data<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor ROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "ROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
hipLaunchKernelGGL(( RoIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
argmax.contiguous().data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
| 91f854709838b9270c1a68946a556e90eebb3475.cu | #include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void RoIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
T* output,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = round(offset_rois[1] * spatial_scale);
int roi_start_h = round(offset_rois[2] * spatial_scale);
int roi_end_w = round(offset_rois[3] * spatial_scale);
int roi_end_h = round(offset_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
if (offset_input[input_index] > maxval) {
maxval = offset_input[input_index];
maxidx = input_index;
}
}
}
output[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename T>
__global__ void RoIPoolBackward(
const int nthreads,
const T* grad_output,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* grad_input,
const T* rois,
const int n_stride,
const int c_stride,
const int h_stride,
const int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
T* grad_input_offset =
grad_input + ((roi_batch_ind * channels + c) * height * width);
int output_offset = n * n_stride + c * c_stride;
const int* argmax_data_offset =
argmax_data + (n * channels + c) * pooled_height * pooled_width;
int argmax = argmax_data_offset[ph * pooled_width + pw];
if (argmax != -1) {
atomicAdd(
grad_input_offset + argmax,
static_cast<T>(
grad_output[output_offset + ph * h_stride + pw * w_stride]));
}
}
}
std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
at::Tensor argmax = at::zeros(
{num_rois, channels, pooled_height, pooled_width},
input.options().dtype(at::kInt));
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] {
RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>(),
argmax.data<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, argmax);
}
at::Tensor ROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& argmax,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
argmax_t{argmax, "argmax", 3};
at::CheckedFrom c = "ROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] {
RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
argmax.contiguous().data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
f3d3ffcdc9d62e5168e4e370494c1b288e44fb5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <iomanip>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
//memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
float *imgPtr; //we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
//Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; //being good citizens are releasing resources
//allocated in loadImageHDR
float *d_red, *d_green, *d_blue; //RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(hipMalloc(&d_red, channelSize));
checkCudaErrors(hipMalloc(&d_green, channelSize));
checkCudaErrors(hipMalloc(&d_blue, channelSize));
checkCudaErrors(hipMalloc(&d_x__, channelSize));
checkCudaErrors(hipMalloc(&d_y__, channelSize));
checkCudaErrors(hipMalloc(&d_logY__, channelSize));
checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice));
//convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
*d_luminance = d_logY__;
//allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads),
dim3(numThreads), 0, 0, d_cdf__,
d_cdf_normalized,
numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
//cleanup
checkCudaErrors(hipFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(hipFree(d_x__));
checkCudaErrors(hipFree(d_y__));
checkCudaErrors(hipFree(d_logY__));
checkCudaErrors(hipFree(d_cdf__));
}
| f3d3ffcdc9d62e5168e4e370494c1b288e44fb5f.cu | #include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <iomanip>
#include "loadSaveImage.h"
#include <thrust/extrema.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//chroma-LogLuminance Space
static float *d_x__;
static float *d_y__;
static float *d_logY__;
//memory for the cdf
static unsigned int *d_cdf__;
static const int numBins = 1024;
size_t numRows__;
size_t numCols__;
/* Copied from Mike's IPython notebook with some minor modifications
* Mainly double precision constants to floats and log10 -> log10f
* Also removed Luminance (Y) channel since it is never used eke*/
__global__ void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
/* Copied from Mike's IPython notebook *
Modified just by having threads read the
normalization constant directly from device memory
instead of copying it back */
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
/* Copied from Mike's IPython notebook *
Modified double constants -> float *
Perform tone mapping based upon new *
luminance scaling */
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
void preProcess(float** d_luminance, unsigned int** d_cdf,
size_t *numRows, size_t *numCols,
unsigned int *numberOfBins,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
float *imgPtr; //we will become responsible for this pointer
loadImageHDR(filename, &imgPtr, &numRows__, &numCols__);
*numRows = numRows__;
*numCols = numCols__;
//first thing to do is split incoming BGR float data into separate channels
size_t numPixels = numRows__ * numCols__;
float *red = new float[numPixels];
float *green = new float[numPixels];
float *blue = new float[numPixels];
//Remeber image is loaded BGR
for (size_t i = 0; i < numPixels; ++i) {
blue[i] = imgPtr[3 * i + 0];
green[i] = imgPtr[3 * i + 1];
red[i] = imgPtr[3 * i + 2];
}
delete[] imgPtr; //being good citizens are releasing resources
//allocated in loadImageHDR
float *d_red, *d_green, *d_blue; //RGB space
size_t channelSize = sizeof(float) * numPixels;
checkCudaErrors(cudaMalloc(&d_red, channelSize));
checkCudaErrors(cudaMalloc(&d_green, channelSize));
checkCudaErrors(cudaMalloc(&d_blue, channelSize));
checkCudaErrors(cudaMalloc(&d_x__, channelSize));
checkCudaErrors(cudaMalloc(&d_y__, channelSize));
checkCudaErrors(cudaMalloc(&d_logY__, channelSize));
checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice));
//convert from RGB space to chrominance/luminance space xyY
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x,
(numRows__ + blockSize.y - 1) / blockSize.y, 1);
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue,
d_x__, d_y__, d_logY__,
.0001f, numRows__, numCols__);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
*d_luminance = d_logY__;
//allocate memory for the cdf of the histogram
*numberOfBins = numBins;
checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins));
*d_cdf = d_cdf__;
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
delete[] red;
delete[] green;
delete[] blue;
}
void postProcess(const std::string& output_file,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y) {
const int numPixels = numRows__ * numCols__;
const int numThreads = 192;
float *d_cdf_normalized;
checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins));
//first normalize the cdf to a maximum value of 1
//this is how we compress the range of the luminance channel
normalize_cdf<<< (numBins + numThreads - 1) / numThreads,
numThreads>>>(d_cdf__,
d_cdf_normalized,
numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//allocate memory for the output RGB channels
float *h_red, *h_green, *h_blue;
float *d_red, *d_green, *d_blue;
h_red = new float[numPixels];
h_green = new float[numPixels];
h_blue = new float[numPixels];
checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels));
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost));
//recombine the image channels
float *imageHDR = new float[numPixels * 3];
for (int i = 0; i < numPixels; ++i) {
imageHDR[3 * i + 0] = h_blue[i];
imageHDR[3 * i + 1] = h_green[i];
imageHDR[3 * i + 2] = h_red[i];
}
saveImageHDR(imageHDR, numRows, numCols, output_file);
delete[] imageHDR;
delete[] h_red;
delete[] h_green;
delete[] h_blue;
//cleanup
checkCudaErrors(cudaFree(d_cdf_normalized));
}
void cleanupGlobalMemory(void)
{
checkCudaErrors(cudaFree(d_x__));
checkCudaErrors(cudaFree(d_y__));
checkCudaErrors(cudaFree(d_logY__));
checkCudaErrors(cudaFree(d_cdf__));
}
|
091e730d88d2beb3d295102d1980918c7e833a0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License
*
* Copyright (c) 1997-2016 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <sci_defs/cuda_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
//______________________________________________________________________
//
// @brief A kernel that applies the stencil used in timeAdvance(...)
// @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z)
// @param domainSize a three component vector that gives the size of the domain including ghost nodes
// @param ghostLayers the number of layers of ghost cells
// @param phi pointer to the source phi allocated on the device
// @param newphi pointer to the sink phi allocated on the device
// @param residual the residual calculated by this individual kernel
__global__ void poissonGPU1Kernel(uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* phi,
double* newphi,
double* residual)
{
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Get the size of the data block in which the variables reside.
// This is essentially the stride in the index calculations.
int dx = domainSize.x;
int dy = domainSize.y;
// If the threads are within the bounds of the ghost layers
// the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction
// is streamed because it allows access of x and y elements
// that are close to one another which should allow coalesced
// memory accesses.
if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
// For an array of [ A ][ B ][ C ], we can index it thus:
// (a * B * C) + (b * C) + (c * 1)
int idx = INDEX3D(dx,dy,i,j,k);
newphi[idx] = (1. / 6)
* (phi[INDEX3D(dx,dy, (i-1), j, k)]
+ phi[INDEX3D(dx,dy, (i+1), j, k)]
+ phi[INDEX3D(dx,dy, i, (j-1), k)]
+ phi[INDEX3D(dx,dy, i, (j+1), k)]
+ phi[INDEX3D(dx,dy, i, j, (k-1))]
+ phi[INDEX3D(dx,dy, i, j, (k+1))]);
// TODO Finish residual calculation using atomics
}
}
}
void launchPoisson1Kernel(dim3 dimGrid,
dim3 dimBlock,
uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* d_phi,
double* d_newphi,
double* residual)
{
hipLaunchKernelGGL(( poissonGPU1Kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, domainLow,
domainHigh,
domainSize,
numGhostCells,
d_phi,
d_newphi,
residual);
}
#ifdef __cplusplus
}
#endif
| 091e730d88d2beb3d295102d1980918c7e833a0b.cu | /*
* The MIT License
*
* Copyright (c) 1997-2016 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <sci_defs/cuda_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
//______________________________________________________________________
//
// @brief A kernel that applies the stencil used in timeAdvance(...)
// @param domainLower a three component vector that gives the lower corner of the work area as (x,y,z)
// @param domainHigh a three component vector that gives the highest non-ghost layer cell of the domain as (x,y,z)
// @param domainSize a three component vector that gives the size of the domain including ghost nodes
// @param ghostLayers the number of layers of ghost cells
// @param phi pointer to the source phi allocated on the device
// @param newphi pointer to the sink phi allocated on the device
// @param residual the residual calculated by this individual kernel
__global__ void poissonGPU1Kernel(uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* phi,
double* newphi,
double* residual)
{
// calculate the thread indices
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Get the size of the data block in which the variables reside.
// This is essentially the stride in the index calculations.
int dx = domainSize.x;
int dy = domainSize.y;
// If the threads are within the bounds of the ghost layers
// the algorithm is allowed to stream along the z direction
// applying the stencil to a line of cells. The z direction
// is streamed because it allows access of x and y elements
// that are close to one another which should allow coalesced
// memory accesses.
if(i > 0 && j > 0 && i < domainHigh.x && j < domainHigh.y) {
for (int k = domainLow.z; k < domainHigh.z; k++) {
// For an array of [ A ][ B ][ C ], we can index it thus:
// (a * B * C) + (b * C) + (c * 1)
int idx = INDEX3D(dx,dy,i,j,k);
newphi[idx] = (1. / 6)
* (phi[INDEX3D(dx,dy, (i-1), j, k)]
+ phi[INDEX3D(dx,dy, (i+1), j, k)]
+ phi[INDEX3D(dx,dy, i, (j-1), k)]
+ phi[INDEX3D(dx,dy, i, (j+1), k)]
+ phi[INDEX3D(dx,dy, i, j, (k-1))]
+ phi[INDEX3D(dx,dy, i, j, (k+1))]);
// TODO Finish residual calculation using atomics
}
}
}
void launchPoisson1Kernel(dim3 dimGrid,
dim3 dimBlock,
uint3 domainLow,
uint3 domainHigh,
uint3 domainSize,
int numGhostCells,
double* d_phi,
double* d_newphi,
double* residual)
{
poissonGPU1Kernel<<< dimGrid, dimBlock >>>(domainLow,
domainHigh,
domainSize,
numGhostCells,
d_phi,
d_newphi,
residual);
}
#ifdef __cplusplus
}
#endif
|
16de635b97588faa535a0815637a884fb02a66b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// TrtSequenceOffset kernels are modified from FasterTransformer
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/providers/cuda/cuda_common.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
// -----------------------------------
// Get indices of non-padding tokens and padding tokens. Here we assume that padding is on the right side of sequence.
// sequence_token_count is number of non-padding tokens per sequence, and it has shape [batch_size].
// For example, we have 3 sequences with 1, 2, 4 non-padding tokens and positions like the following (* means padding):
// Sequence_0: 0, 1*, 2*, 3*
// Sequence_1: 4, 5, 6*, 7*
// Sequence_2: 8, 9, 10, 11
// token_offset: 0, 4, 5, 8, 9, 10, 11, 1*, 2*, 3*, 6*, 7*
// token_count_buffer has two numbers for non-padding tokens:
// total_token_count: 1 + 2 + 4 = 7
// max_token_count: 4
// cumulated_token_count: 0, 1, 1+2, 1+2+4
__global__ void getTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
// Find offset of non-padding tokens, and max sequence length among all batches
// TODO(tianleiwu): Use cub::DevicePartition::Flagged like BuildGlobalIndex in longformer_global_impl.cu
// to build token_offset when sequence length is large.
int total_tokens = 0;
int max_tokens = 0;
int index = 0;
cumulated_token_count[0] = 0;
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
if (count > max_tokens) {
max_tokens = count;
}
cumulated_token_count[i + 1] = cumulated_token_count[i] + count;
for (int j = 0; j < count; j++) {
token_offset[index] = i * sequence_length + j;
index++;
}
total_tokens += count;
}
// Offset of paddings
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
for (int j = 0; j < sequence_length - count; j++) {
token_offset[index] = i * sequence_length + count + j;
index++;
}
}
token_count_buffer[0] = total_tokens;
token_count_buffer[1] = max_tokens;
}
void LaunchGetTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
hipStream_t stream) {
hipLaunchKernelGGL(( getTokenOffset), dim3(1), dim3(1), 0, stream,
token_count_buffer, token_offset, cumulated_token_count, sequence_token_count, batch_size, sequence_length);
}
// -----------------------------------
// Remove paddings
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
removePadding(T* target, const T* source, const int* token_offset, const int width) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int source_offset = token_offset[token_index];
const int target_offset = token_index;
for (int i = tid; i < width; i += blockDim.x) {
target[target_offset * width + i] = source[source_offset * width + i];
}
}
template <>
void LaunchRemovePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
hipStream_t stream) {
// input: [batch_size, sequence_length, hidden_size]
// output: [token_count, hidden_size]
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( removePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( removePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( removePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
hipLaunchKernelGGL(( removePadding<int16_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width);
}
}
template <>
void LaunchRemovePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
hipStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( removePadding<int4>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( removePadding<int64_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( removePadding<int32_t>), dim3(token_count), dim3(kMAX_THREADS_PER_BLOCK), 0, stream, output2, input2, token_offset, width);
}
}
// -----------------------------------
// Recover padding.
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(T* target, const T* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
constexpr T padding_zero = 0;
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(int4* target, const int4* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
int4 padding_zero{0, 0, 0, 0};
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
void LaunchRestorePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
hipStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( restorePadding<int4>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( restorePadding<int64_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( restorePadding<int32_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
}
}
template <>
void LaunchRestorePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
hipStream_t stream) {
// input: [token_count, hidden_size]
// output: [batch_size, sequence_length, hidden_size]
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
hipLaunchKernelGGL(( restorePadding<int4>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
hipLaunchKernelGGL(( restorePadding<int64_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
hipLaunchKernelGGL(( restorePadding<int32_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
hipLaunchKernelGGL(( restorePadding<int16_t>), dim3(grid_size), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
output2, input2, token_offset, width, token_count);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = tmp_offset[i] + sequence_token_count[i];
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when there is no padding (or padding is removed)
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
hipStream_t stream) {
hipLaunchKernelGGL(( getTrtSequenceOffset), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (batch_size + 1), stream,
trt_mha_padding_offset, sequence_token_count, batch_size);
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
// B for fused attention is 2 * batch_size
for (int i = 0; i < batch_size; i++) {
tmp_offset[i * 2 + 1] = tmp_offset[i * 2] + sequence_token_count[i];
tmp_offset[i * 2 + 2] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < 2 * batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// When there is no attention mask, the sequence offset is like
// 0, sequence_length, 2 * sequence_length, 3 * sequence_length, .... ,batch_size * sequence_length
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffsetNoMask(int* trt_mha_padding_offset,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when we keep the padding
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
hipStream_t stream) {
if (nullptr == sequence_token_count) {
hipLaunchKernelGGL(( getTrtSequenceOffsetNoMask), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (batch_size + 1), stream,
trt_mha_padding_offset, batch_size, sequence_length);
} else {
hipLaunchKernelGGL(( getTrtSequenceOffset), dim3(1), dim3(kMAX_THREADS_PER_BLOCK), sizeof(int) * (2 * batch_size + 1), stream,
trt_mha_padding_offset, sequence_token_count, batch_size, sequence_length);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 16de635b97588faa535a0815637a884fb02a66b3.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// TrtSequenceOffset kernels are modified from FasterTransformer
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "core/providers/cuda/cuda_common.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
// -----------------------------------
// Get indices of non-padding tokens and padding tokens. Here we assume that padding is on the right side of sequence.
// sequence_token_count is number of non-padding tokens per sequence, and it has shape [batch_size].
// For example, we have 3 sequences with 1, 2, 4 non-padding tokens and positions like the following (* means padding):
// Sequence_0: 0, 1*, 2*, 3*
// Sequence_1: 4, 5, 6*, 7*
// Sequence_2: 8, 9, 10, 11
// token_offset: 0, 4, 5, 8, 9, 10, 11, 1*, 2*, 3*, 6*, 7*
// token_count_buffer has two numbers for non-padding tokens:
// total_token_count: 1 + 2 + 4 = 7
// max_token_count: 4
// cumulated_token_count: 0, 1, 1+2, 1+2+4
__global__ void getTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
// Find offset of non-padding tokens, and max sequence length among all batches
// TODO(tianleiwu): Use cub::DevicePartition::Flagged like BuildGlobalIndex in longformer_global_impl.cu
// to build token_offset when sequence length is large.
int total_tokens = 0;
int max_tokens = 0;
int index = 0;
cumulated_token_count[0] = 0;
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
if (count > max_tokens) {
max_tokens = count;
}
cumulated_token_count[i + 1] = cumulated_token_count[i] + count;
for (int j = 0; j < count; j++) {
token_offset[index] = i * sequence_length + j;
index++;
}
total_tokens += count;
}
// Offset of paddings
for (int i = 0; i < batch_size; i++) {
const int count = sequence_token_count[i];
for (int j = 0; j < sequence_length - count; j++) {
token_offset[index] = i * sequence_length + count + j;
index++;
}
}
token_count_buffer[0] = total_tokens;
token_count_buffer[1] = max_tokens;
}
void LaunchGetTokenOffset(int* token_count_buffer,
int* token_offset,
int* cumulated_token_count,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
cudaStream_t stream) {
getTokenOffset<<<1, 1, 0, stream>>>(
token_count_buffer, token_offset, cumulated_token_count, sequence_token_count, batch_size, sequence_length);
}
// -----------------------------------
// Remove paddings
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
removePadding(T* target, const T* source, const int* token_offset, const int width) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int source_offset = token_offset[token_index];
const int target_offset = token_index;
for (int i = tid; i < width; i += blockDim.x) {
target[target_offset * width + i] = source[source_offset * width + i];
}
}
template <>
void LaunchRemovePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
cudaStream_t stream) {
// input: [batch_size, sequence_length, hidden_size]
// output: [token_count, hidden_size]
// Make sure memory is aligned to 128 bit
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
removePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
removePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
removePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
removePadding<int16_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width);
}
}
template <>
void LaunchRemovePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
cudaStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
removePadding<int4><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
removePadding<int64_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
removePadding<int32_t><<<token_count, kMAX_THREADS_PER_BLOCK, 0, stream>>>(output2, input2, token_offset, width);
}
}
// -----------------------------------
// Recover padding.
template <typename T>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(T* target, const T* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
constexpr T padding_zero = 0;
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
restorePadding(int4* target, const int4* source, const int* token_offset, const int width, const int token_count) {
const int tid = threadIdx.x;
const int token_index = blockIdx.x;
const int target_seq_id = token_offset[token_index];
const int source_seq_id = token_index;
int4 padding_zero{0, 0, 0, 0};
if (token_index < token_count) {
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = source[source_seq_id * width + i];
}
} else {
// It is padding: fill with zeros
for (int i = tid; i < width; i += blockDim.x) {
target[target_seq_id * width + i] = padding_zero;
}
}
}
template <>
void LaunchRestorePadding(
float* output, const float* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
cudaStream_t stream) {
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
restorePadding<int4><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
restorePadding<int64_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
restorePadding<int32_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
}
}
template <>
void LaunchRestorePadding(
half* output, const half* input, const int* token_offset, const int token_count, const int hidden_size,
const int batch_size, const int sequence_length,
cudaStream_t stream) {
// input: [token_count, hidden_size]
// output: [batch_size, sequence_length, hidden_size]
ORT_ENFORCE(!(reinterpret_cast<size_t>(input) & 0xF) && !(reinterpret_cast<size_t>(output) & 0xF), "alignment");
int grid_size = batch_size * sequence_length;
if (hidden_size % 8 == 0) {
const int width = hidden_size / 8;
const int4* input2 = reinterpret_cast<const int4*>(input);
int4* output2 = reinterpret_cast<int4*>(output);
restorePadding<int4><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 4 == 0) {
const int width = hidden_size / 4;
const int64_t* input2 = reinterpret_cast<const int64_t*>(input);
int64_t* output2 = reinterpret_cast<int64_t*>(output);
restorePadding<int64_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else if (hidden_size % 2 == 0) {
const int width = hidden_size / 2;
const int32_t* input2 = reinterpret_cast<const int32_t*>(input);
int32_t* output2 = reinterpret_cast<int32_t*>(output);
restorePadding<int32_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
} else {
const int width = hidden_size;
const int16_t* input2 = reinterpret_cast<const int16_t*>(input);
int16_t* output2 = reinterpret_cast<int16_t*>(output);
restorePadding<int16_t><<<grid_size, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
output2, input2, token_offset, width, token_count);
}
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = tmp_offset[i] + sequence_token_count[i];
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when there is no padding (or padding is removed)
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
cudaStream_t stream) {
getTrtSequenceOffset<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (batch_size + 1), stream>>>(
trt_mha_padding_offset, sequence_token_count, batch_size);
}
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
// B for fused attention is 2 * batch_size
for (int i = 0; i < batch_size; i++) {
tmp_offset[i * 2 + 1] = tmp_offset[i * 2] + sequence_token_count[i];
tmp_offset[i * 2 + 2] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < 2 * batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// When there is no attention mask, the sequence offset is like
// 0, sequence_length, 2 * sequence_length, 3 * sequence_length, .... ,batch_size * sequence_length
__global__ void __launch_bounds__(kMAX_THREADS_PER_BLOCK)
getTrtSequenceOffsetNoMask(int* trt_mha_padding_offset,
const int batch_size,
const int sequence_length) {
extern __shared__ int tmp_offset[];
if (threadIdx.x == 0) {
tmp_offset[0] = 0;
for (int i = 0; i < batch_size; i++) {
tmp_offset[i + 1] = sequence_length * (i + 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < batch_size + 1; i += blockDim.x) {
trt_mha_padding_offset[i] = tmp_offset[i];
}
}
// Get sequence offset for TensorRT fused attention when we keep the padding
void LaunchTrtSequenceOffset(int* trt_mha_padding_offset,
const int* sequence_token_count,
const int batch_size,
const int sequence_length,
cudaStream_t stream) {
if (nullptr == sequence_token_count) {
getTrtSequenceOffsetNoMask<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (batch_size + 1), stream>>>(
trt_mha_padding_offset, batch_size, sequence_length);
} else {
getTrtSequenceOffset<<<1, kMAX_THREADS_PER_BLOCK, sizeof(int) * (2 * batch_size + 1), stream>>>(
trt_mha_padding_offset, sequence_token_count, batch_size, sequence_length);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
image_grayscale.hip | // !!! This is a file automatically generated by hipify!!!
#include "device/image_grayscale.h"
#include <cassert>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include "device/cuda_debug.h"
__global__ void compute_grayscale_kernel(
void* color_img_ptr, uint32_t color_img_pitch,
void* grayscale_img_ptr, uint32_t grayscale_image_pitch,
uint32_t image_width, uint32_t image_height) {
const uint32_t image_index_u = blockIdx.x*blockDim.x + threadIdx.x;
const uint32_t image_index_v = blockIdx.y*blockDim.y + threadIdx.y;
if (image_index_u >= image_width or image_index_v >= image_height) { return; }
uint8_t* color_byte_ptr = reinterpret_cast<uint8_t*>(color_img_ptr);
color_byte_ptr += color_img_pitch*image_index_v + sizeof(uchar4)*image_index_u;
uint8_t* grayscale_byte_ptr = reinterpret_cast<uint8_t*>(grayscale_img_ptr);
grayscale_byte_ptr += grayscale_image_pitch*image_index_v + sizeof(uchar1)*image_index_u;
uchar4* color_pixel_ptr = reinterpret_cast<uchar4*>(color_byte_ptr);
uchar1* grayscale_pixel_ptr = reinterpret_cast<uchar1*>(grayscale_byte_ptr);
uchar4 color_pixel = *color_pixel_ptr;
float grayscale_value = 0.3f*static_cast<float>(color_pixel.x) + 0.59f*static_cast<float>(color_pixel.y) + 0.11f*static_cast<float>(color_pixel.z);
uchar1 grayscale_pixel = make_uchar1(__float2int_rn(grayscale_value));
*grayscale_pixel_ptr = grayscale_pixel;
}
void compute_grayscale(const PitchedCUDABuffer& color_image, PitchedCUDABuffer& grayscale_image) {
// sanity check the input
assert(color_image.get_element_size_in_bytes() == 4 * sizeof(uint8_t));
assert(grayscale_image.get_element_size_in_bytes() == sizeof(uint8_t));
assert(color_image.get_elements_per_row() == grayscale_image.get_elements_per_row());
assert(color_image.get_number_of_rows() == grayscale_image.get_number_of_rows());
const uint32_t image_width = color_image.get_elements_per_row();
const uint32_t image_height = color_image.get_number_of_rows();
const dim3 grayscale_block_dim(32, 32, 1);
const dim3 grayscale_grid_dim(image_width/grayscale_block_dim.x + (image_width % grayscale_block_dim.x == 0 ? 0 : 1),
image_height/grayscale_block_dim.y + (image_height % grayscale_block_dim.y == 0 ? 0 : 1),
1);
hipLaunchKernelGGL(( compute_grayscale_kernel), dim3(grayscale_grid_dim), dim3(grayscale_block_dim), 0, 0, color_image.get_dev_ptr(), color_image.get_pitch_in_bytes(),
grayscale_image.get_dev_ptr(), grayscale_image.get_pitch_in_bytes(),
image_width, image_height);
CUDA_SYNC_CHECK();
}
| image_grayscale.cu | #include "device/image_grayscale.h"
#include <cassert>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include "device/cuda_debug.h"
__global__ void compute_grayscale_kernel(
void* color_img_ptr, uint32_t color_img_pitch,
void* grayscale_img_ptr, uint32_t grayscale_image_pitch,
uint32_t image_width, uint32_t image_height) {
const uint32_t image_index_u = blockIdx.x*blockDim.x + threadIdx.x;
const uint32_t image_index_v = blockIdx.y*blockDim.y + threadIdx.y;
if (image_index_u >= image_width or image_index_v >= image_height) { return; }
uint8_t* color_byte_ptr = reinterpret_cast<uint8_t*>(color_img_ptr);
color_byte_ptr += color_img_pitch*image_index_v + sizeof(uchar4)*image_index_u;
uint8_t* grayscale_byte_ptr = reinterpret_cast<uint8_t*>(grayscale_img_ptr);
grayscale_byte_ptr += grayscale_image_pitch*image_index_v + sizeof(uchar1)*image_index_u;
uchar4* color_pixel_ptr = reinterpret_cast<uchar4*>(color_byte_ptr);
uchar1* grayscale_pixel_ptr = reinterpret_cast<uchar1*>(grayscale_byte_ptr);
uchar4 color_pixel = *color_pixel_ptr;
float grayscale_value = 0.3f*static_cast<float>(color_pixel.x) + 0.59f*static_cast<float>(color_pixel.y) + 0.11f*static_cast<float>(color_pixel.z);
uchar1 grayscale_pixel = make_uchar1(__float2int_rn(grayscale_value));
*grayscale_pixel_ptr = grayscale_pixel;
}
void compute_grayscale(const PitchedCUDABuffer& color_image, PitchedCUDABuffer& grayscale_image) {
// sanity check the input
assert(color_image.get_element_size_in_bytes() == 4 * sizeof(uint8_t));
assert(grayscale_image.get_element_size_in_bytes() == sizeof(uint8_t));
assert(color_image.get_elements_per_row() == grayscale_image.get_elements_per_row());
assert(color_image.get_number_of_rows() == grayscale_image.get_number_of_rows());
const uint32_t image_width = color_image.get_elements_per_row();
const uint32_t image_height = color_image.get_number_of_rows();
const dim3 grayscale_block_dim(32, 32, 1);
const dim3 grayscale_grid_dim(image_width/grayscale_block_dim.x + (image_width % grayscale_block_dim.x == 0 ? 0 : 1),
image_height/grayscale_block_dim.y + (image_height % grayscale_block_dim.y == 0 ? 0 : 1),
1);
compute_grayscale_kernel<<<grayscale_grid_dim, grayscale_block_dim>>>(color_image.get_dev_ptr(), color_image.get_pitch_in_bytes(),
grayscale_image.get_dev_ptr(), grayscale_image.get_pitch_in_bytes(),
image_width, image_height);
CUDA_SYNC_CHECK();
}
|
666cdf50ffcfd26563830492b7b737d95e73cbd2.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2020 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "hip/hip_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( calc_new_currents_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
typedef ReductionAccessor<GPUAccumulateCharge,false/*exclusive*/,1,coord_t,
Realm::AffineAccessor<float,1,coord_t> > AccessorRDfloat;
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, float value)
{
switch (loc)
{
case PRIVATE_PTR:
GPUAccumulateCharge::apply<true/*exclusive*/>(pvt[ptr], value);
break;
case SHARED_PTR:
shr[ptr] <<= value;
break;
case GHOST_PTR:
ghost[ptr] <<= value;
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( distribute_charge_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
hipLaunchKernelGGL(( update_voltages_kernel), dim3(num_blocks),dim3(threads_per_block), 0, 0, piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
| 666cdf50ffcfd26563830492b7b737d95e73cbd2.cu | /* Copyright 2020 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "cuda_runtime.h"
class GPUAccumulateCharge {
public:
typedef float LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
#ifdef __CUDA_ARCH__
float *target = &lhs;
atomicAdd(target,rhs);
#else
assert(false);
#endif
}
template<bool EXCLUSIVE>
__host__ __device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
#ifdef __CUDA_ARCH__
float *target = &rhs1;
atomicAdd(target,rhs2);
#else
assert(false);
#endif
}
};
template<typename AT, int SEGMENTS>
struct SegmentAccessors {
public:
__host__ __device__
inline AT& operator[](unsigned index) { return accessors[index]; }
__host__ __device__
inline const AT& operator[](unsigned index) const { return accessors[index]; }
public:
AT accessors[SEGMENTS];
};
__device__ __forceinline__
float find_node_voltage(const AccessorROfloat &pvt,
const AccessorROfloat &shr,
const AccessorROfloat &ghost,
Point<1> ptr, PointerLocation loc)
{
switch (loc)
{
case PRIVATE_PTR:
return pvt[ptr];
case SHARED_PTR:
return shr[ptr];
case GHOST_PTR:
return ghost[ptr];
default:
break; // assert(false);
}
return 0.f;
}
__global__
void calc_new_currents_kernel(Point<1> first,
int num_wires,
float dt,
int steps,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_inductance,
const AccessorROfloat fa_resistance,
const AccessorROfloat fa_wire_cap,
const AccessorROfloat fa_pvt_voltage,
const AccessorROfloat fa_shr_voltage,
const AccessorROfloat fa_ghost_voltage,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents,
const SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// We can do this because we know we have SOA layout and wires are dense
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float recip_dt = 1.f/dt;
float temp_v[WIRE_SEGMENTS+1];
float temp_i[WIRE_SEGMENTS];
float old_i[WIRE_SEGMENTS];
float old_v[WIRE_SEGMENTS-1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = fa_currents[i][wire_ptr];
old_i[i] = temp_i[i];
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = fa_voltages[i][wire_ptr];
old_v[i] = temp_v[i+1];
}
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
temp_v[0] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
temp_v[WIRE_SEGMENTS] =
find_node_voltage(fa_pvt_voltage, fa_shr_voltage, fa_ghost_voltage, in_ptr, in_loc);
// Solve the RLC model iteratively
float inductance = fa_inductance[wire_ptr];
float recip_resistance = 1.f/fa_resistance[wire_ptr];
float recip_capacitance = 1.f/fa_wire_cap[wire_ptr];
for (int j = 0; j < steps; j++)
{
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
temp_i[i] = ((temp_v[i] - temp_v[i+1]) -
(inductance * (temp_i[i] - old_i[i]) * recip_dt)) * recip_resistance;
}
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
temp_v[i+1] = old_v[i] + dt * (temp_i[i] - temp_i[i+1]) * recip_capacitance;
}
}
// Write out the result
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i][wire_ptr] = temp_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i][wire_ptr] = temp_v[i+1];
}
}
/*static*/
__host__
void CalcNewCurrentsTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS> fa_currents;
for (int i = 0; i < WIRE_SEGMENTS; i++)
fa_currents[i] = AccessorRWfloat(regions[0], FID_CURRENT+i);
SegmentAccessors<AccessorRWfloat,WIRE_SEGMENTS-1> fa_voltages;
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
fa_voltages[i] = AccessorRWfloat(regions[0], FID_WIRE_VOLTAGE+i);
const AccessorROpoint fa_in_ptr(regions[1], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[1], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[1], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[1], FID_OUT_LOC);
const AccessorROfloat fa_inductance(regions[1], FID_INDUCTANCE);
const AccessorROfloat fa_resistance(regions[1], FID_RESISTANCE);
const AccessorROfloat fa_wire_cap(regions[1], FID_WIRE_CAP);
const AccessorROfloat fa_pvt_voltage(regions[2], FID_NODE_VOLTAGE);
const AccessorROfloat fa_shr_voltage(regions[3], FID_NODE_VOLTAGE);
const AccessorROfloat fa_ghost_voltage(regions[4], FID_NODE_VOLTAGE);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
calc_new_currents_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
piece.steps,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_inductance,
fa_resistance,
fa_wire_cap,
fa_pvt_voltage,
fa_shr_voltage,
fa_ghost_voltage,
fa_currents,
fa_voltages);
#endif
}
typedef ReductionAccessor<GPUAccumulateCharge,false/*exclusive*/,1,coord_t,
Realm::AffineAccessor<float,1,coord_t> > AccessorRDfloat;
__device__ __forceinline__
void reduce_local(const AccessorRWfloat &pvt,
const AccessorRDfloat &shr,
const AccessorRDfloat &ghost,
Point<1> ptr, PointerLocation loc, float value)
{
switch (loc)
{
case PRIVATE_PTR:
GPUAccumulateCharge::apply<true/*exclusive*/>(pvt[ptr], value);
break;
case SHARED_PTR:
shr[ptr] <<= value;
break;
case GHOST_PTR:
ghost[ptr] <<= value;
break;
default:
break; // assert(false); // should never make it here
}
}
__global__
void distribute_charge_kernel(Point<1> first,
const int num_wires,
float dt,
const AccessorROpoint fa_in_ptr,
const AccessorROpoint fa_out_ptr,
const AccessorROloc fa_in_loc,
const AccessorROloc fa_out_loc,
const AccessorROfloat fa_in_current,
const AccessorROfloat fa_out_current,
const AccessorRWfloat fa_pvt_charge,
const AccessorRDfloat fa_shr_charge,
const AccessorRDfloat fa_ghost_charge)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
const Point<1> wire_ptr = first + tid;
float in_dq = -dt * fa_in_current[wire_ptr];
float out_dq = dt * fa_out_current[wire_ptr];
Point<1> in_ptr = fa_in_ptr[wire_ptr];
PointerLocation in_loc = fa_in_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, in_ptr, in_loc, in_dq);
Point<1> out_ptr = fa_out_ptr[wire_ptr];
PointerLocation out_loc = fa_out_loc[wire_ptr];
reduce_local(fa_pvt_charge, fa_shr_charge, fa_ghost_charge, out_ptr, out_loc, out_dq);
}
}
/*static*/
__host__
void DistributeChargeTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorROpoint fa_in_ptr(regions[0], FID_IN_PTR);
const AccessorROpoint fa_out_ptr(regions[0], FID_OUT_PTR);
const AccessorROloc fa_in_loc(regions[0], FID_IN_LOC);
const AccessorROloc fa_out_loc(regions[0], FID_OUT_LOC);
const AccessorROfloat fa_in_current(regions[0], FID_CURRENT);
const AccessorROfloat fa_out_current(regions[0], FID_CURRENT+WIRE_SEGMENTS-1);
const AccessorRWfloat fa_pvt_charge(regions[1], FID_CHARGE);
const AccessorRDfloat fa_shr_charge(regions[2], FID_CHARGE, REDUCE_ID);
const AccessorRDfloat fa_ghost_charge(regions[3], FID_CHARGE, REDUCE_ID);
const int threads_per_block = 256;
const int num_blocks = (piece.num_wires + (threads_per_block-1)) / threads_per_block;
distribute_charge_kernel<<<num_blocks,threads_per_block>>>(piece.first_wire,
piece.num_wires,
piece.dt,
fa_in_ptr,
fa_out_ptr,
fa_in_loc,
fa_out_loc,
fa_in_current,
fa_out_current,
fa_pvt_charge,
fa_shr_charge,
fa_ghost_charge);
#endif
}
__global__
void update_voltages_kernel(Point<1> first,
const int num_nodes,
const AccessorRWfloat fa_pvt_voltage,
const AccessorRWfloat fa_shr_voltage,
const AccessorRWfloat fa_pvt_charge,
const AccessorRWfloat fa_shr_charge,
const AccessorROfloat fa_pvt_cap,
const AccessorROfloat fa_shr_cap,
const AccessorROfloat fa_pvt_leakage,
const AccessorROfloat fa_shr_leakage,
const AccessorROloc fa_ptr_loc)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
const Point<1> node_ptr = first + tid;
PointerLocation node_loc = fa_ptr_loc[node_ptr];
if (node_loc == PRIVATE_PTR)
{
float voltage = fa_pvt_voltage[node_ptr];
float charge = fa_pvt_charge[node_ptr];
float capacitance = fa_pvt_cap[node_ptr];
float leakage = fa_pvt_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
else
{
float voltage = fa_shr_voltage[node_ptr];
float charge = fa_shr_charge[node_ptr];
float capacitance = fa_shr_cap[node_ptr];
float leakage = fa_shr_leakage[node_ptr];
voltage += (charge / capacitance);
voltage *= (1.f - leakage);
fa_pvt_voltage[node_ptr] = voltage;
fa_pvt_charge[node_ptr] = 0.f;
}
}
}
/*static*/
__host__
void UpdateVoltagesTask::gpu_base_impl(const CircuitPiece &piece,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
const AccessorRWfloat fa_pvt_voltage(regions[0], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_pvt_charge(regions[0], FID_CHARGE);
const AccessorRWfloat fa_shr_voltage(regions[1], FID_NODE_VOLTAGE);
const AccessorRWfloat fa_shr_charge(regions[1], FID_CHARGE);
const AccessorROfloat fa_pvt_cap(regions[2], FID_NODE_CAP);
const AccessorROfloat fa_pvt_leakage(regions[2], FID_LEAKAGE);
const AccessorROfloat fa_shr_cap(regions[3], FID_NODE_CAP);
const AccessorROfloat fa_shr_leakage(regions[3], FID_LEAKAGE);
const AccessorROloc fa_ptr_loc(regions[4], FID_LOCATOR);
const int threads_per_block = 256;
const int num_blocks = (piece.num_nodes + (threads_per_block-1)) / threads_per_block;
update_voltages_kernel<<<num_blocks,threads_per_block>>>(piece.first_node,
piece.num_nodes,
fa_pvt_voltage,
fa_shr_voltage,
fa_pvt_charge,
fa_shr_charge,
fa_pvt_cap,
fa_shr_cap,
fa_pvt_leakage,
fa_shr_leakage,
fa_ptr_loc);
#endif
}
|
4465b8c4640b7739054ecebd5a2b6dd06fe2cfff.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include "hip/device_functions.h"
#include "transformfunc.h"
#include "funcinterface.h"
#include "cutranspose.h"
#include "time_.h"
// hipfftHandle plan;
hipfftHandle plan;
hipfftHandle plan_c2r;
int do_r2c,do_c2r;
//
void do_transform(double *d_data,double *d_out,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
if(d_out==NULL||d_out==d_data){
if(NX==NY&&NX==NZ){
// printf("do inplace\n");
do_transform_cubic(d_data,NX,NY,NZ,X_TRANS,Y_TRANS,Z_TRANS);
}else{
printf("the matrix for inplace transform must be cubic!\n");
exit(0);
}
}else{
if(d_out!=d_data){
// printf("do outoplace\n");
do_transform_nocubic(d_data,d_out,NX,NY,NZ,X_TRANS,Y_TRANS,Z_TRANS);
}
}
}
void do_transform_cubic(double *d_data,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
do_c2r=0;
do_r2c=0;
if(X_TRANS==DST_2||Y_TRANS==DST_2||Z_TRANS==DST_2||
X_TRANS==DFT_C2R||Y_TRANS==DFT_C2R||Z_TRANS==DFT_C2R||
X_TRANS==DCT_2||Y_TRANS==DCT_2||Z_TRANS==DCT_2||
X_TRANS==DST_1||Y_TRANS==DST_1||Z_TRANS==DST_1||
X_TRANS==DCT_1||Y_TRANS==DCT_1||Z_TRANS==DCT_1){
do_c2r=1;
int n[1]={NX};
int inembeb[1]={(NX+2)/2};
int onembeb[1]={(NX+2)};
hipfftResult r = hipfftPlanMany(&plan_c2r,1,n,
inembeb,1,(NX+2)/2,
onembeb,1,(NX+2),
HIPFFT_Z2D, (NX+2)*(NX+2));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
}
if(X_TRANS==DST_3||Y_TRANS==DST_3||Z_TRANS==DST_3||
X_TRANS==DCT_3||Y_TRANS==DCT_3||Z_TRANS==DCT_3||
X_TRANS==DFT_R2C||Y_TRANS==DFT_R2C||Z_TRANS==DFT_R2C){
do_r2c=1;
int n[1]={NX};
int inembeb[1]={NX+2};
int onembeb[1]={(NX+2)/2};
hipfftResult r = hipfftPlanMany(&plan,1,n,
inembeb,1,NX+2,
onembeb,1,(NX+2)/2,
HIPFFT_D2Z, (NX+2)*(NX+2));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
}
if(X_TRANS==DST_1){
// printf("x transfor dst1\n");
run_3d_dst_1_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DST_3){
// printf("x transfor dst3\n");
run_3d_dst_3_inplace(d_data,NX+1,plan);
}else if(X_TRANS==DCT_1){
// printf("x transfor dct1\n");
run_3d_dct_1_inplace(d_data,NX+1,plan_c2r);
}else if(X_TRANS==DCT_2){
// printf("x transfor dct2\n");
run_3d_dct_2_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DCT_3){
// printf("x transfor dct3\n");
run_3d_dct_3_inplace(d_data,NX,plan);
}else if(X_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NX,plan);
}else if(X_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NX,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
hipError_t e;
if((e=hipGetLastError())!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(e));
exit(0);
}
// printf("%s\n",hipGetErrorString(hipGetLastError()));
int mat_size1[3]={NX+2,NY+2,NZ+2};
int permutation1[3]={1,0,2};
cut_transpose3d(d_data,d_data,mat_size1,permutation1,1);
hipDeviceSynchronize();
if(Y_TRANS==DST_1){
// printf("y transfor dst1\n");
run_3d_dst_1_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DST_3){
// printf("y transfor dst3\n");
run_3d_dst_3_inplace(d_data,NY+1,plan);
}else if(Y_TRANS==DCT_1){
// printf("y transfor dct1\n");
run_3d_dct_1_inplace(d_data,NY+1,plan_c2r);
}else if(Y_TRANS==DCT_2){
// printf("y transfor dct2\n");
run_3d_dct_2_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DCT_3){
// printf("y transfor dct3\n");
run_3d_dct_3_inplace(d_data,NY,plan);
}else if(Y_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NY,plan);
}else if(Y_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NY,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
if((e=hipGetLastError())!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(e));
exit(0);
}
int mat_size2[3]={NY+2,NX+2,NZ+2};
int permutation2[3]={2,0,1};
cut_transpose3d(d_data,d_data,mat_size2,permutation2,1);
hipDeviceSynchronize();
if(Z_TRANS==DST_1){
// printf("z transfor dst1\n");
run_3d_dst_1_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DST_3){
// printf("z transfor dst3\n");
run_3d_dst_3_inplace(d_data,NZ+1,plan);
}else if(Z_TRANS==DCT_1){
// printf("z transfor dct1\n");
run_3d_dct_1_inplace(d_data,NZ+1,plan_c2r);
}else if(Z_TRANS==DCT_2){
// printf("z transfor dct2\n");
run_3d_dct_2_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DCT_3){
// printf("z transfor dct3\n");
run_3d_dct_3_inplace(d_data,NZ,plan);
}else if(Z_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NZ,plan);
}else if(Z_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NZ,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
if((e=hipGetLastError())!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(e));
exit(0);
}
int mat_size4[3]={NZ+2,NY+2,NX+2};
int permutation4[3]={2,1,0};
cut_transpose3d(d_data,d_data,mat_size4,permutation4,1);
hipDeviceSynchronize();
freeMemory_cubic();
}
void do_transform_nocubic(double *d_data,double *d_out,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
if(X_TRANS==DST_1){
// printf("x transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DST_3){
// printf("x transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_data,NX+1,NY+1,NZ+1);
}
else if(X_TRANS==DCT_1){
// printf("x transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_data,NX+1,NY+1,NZ+1);
}
else if(X_TRANS==DCT_2){
// printf("x transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DCT_3){
// printf("x transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_data,NX,NY,NZ);
}else if(X_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_data,NX,NY,NZ);
}else if(X_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_data,NX,NY,NZ);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
if(hipGetLastError()!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(hipGetLastError()));
exit(0);
}
int mat_size1[3]={NX+2,NY+2,NZ+2};
int permutation1[3]={1,2,0};
cut_transpose3d(d_out,d_data,mat_size1,permutation1,1);
hipDeviceSynchronize();
if(Y_TRANS==DST_1){
// printf("y transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DST_3){
// printf("y transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_out,NY+1,NZ+1,NX+1);
}
else if(Y_TRANS==DCT_1){
// printf("y transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_out,NY+1,NZ+1,NX+1);
}
else if(Y_TRANS==DCT_2){
// printf("y transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DCT_3){
// printf("y transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_out,NY,NZ,NX);
}else if(Y_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_out,NY,NZ,NX);
}else if(Y_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_out,NY,NZ,NX);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
if(hipGetLastError()!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(hipGetLastError()));
exit(0);
}
int mat_size2[3]={NY+2,NZ+2,NX+2};
int permutation2[3]={1,2,0};
cut_transpose3d(d_data,d_out,mat_size2,permutation2,1);
hipDeviceSynchronize();
if(Z_TRANS==DST_1){
// printf("z transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DST_3){
// printf("z transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_data,NZ+1,NX+1,NY+1);
}
else if(Z_TRANS==DCT_1){
// printf("z transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_data,NZ+1,NX+1,NY+1);
}
else if(Z_TRANS==DCT_2){
// printf("z transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DCT_3){
// printf("z transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_data,NZ,NX,NY);
}else if(Z_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_data,NZ,NX,NY);
}else if(Z_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_data,NZ,NX,NY);
}else{
printf("Please input the correct transform kind\n");
}
hipDeviceSynchronize();
if(hipGetLastError()!=hipSuccess){
printf("CUDA ERROR: %s !\n",hipGetErrorString(hipGetLastError()));
exit(0);
}
// printf("%s\n",hipGetErrorString(hipGetLastError()));
int mat_size3[3]={NZ+2,NX+2,NY+2};
int permutation3[3]={1,2,0};
cut_transpose3d(d_out,d_data,mat_size3,permutation3,1);
hipDeviceSynchronize();
}
void freeMemory_cubic(){
if(do_r2c==1)
hipfftDestroy(plan);
if(do_c2r==1)
hipfftDestroy(plan_c2r);
}
| 4465b8c4640b7739054ecebd5a2b6dd06fe2cfff.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include "device_functions.h"
#include "transformfunc.h"
#include "funcinterface.h"
#include "cutranspose.h"
#include "time_.h"
// cufftHandle plan;
cufftHandle plan;
cufftHandle plan_c2r;
int do_r2c,do_c2r;
//会改变输入数组
void do_transform(double *d_data,double *d_out,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
if(d_out==NULL||d_out==d_data){
if(NX==NY&&NX==NZ){
// printf("do inplace\n");
do_transform_cubic(d_data,NX,NY,NZ,X_TRANS,Y_TRANS,Z_TRANS);
}else{
printf("the matrix for inplace transform must be cubic!\n");
exit(0);
}
}else{
if(d_out!=d_data){
// printf("do outoplace\n");
do_transform_nocubic(d_data,d_out,NX,NY,NZ,X_TRANS,Y_TRANS,Z_TRANS);
}
}
}
void do_transform_cubic(double *d_data,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
do_c2r=0;
do_r2c=0;
if(X_TRANS==DST_2||Y_TRANS==DST_2||Z_TRANS==DST_2||
X_TRANS==DFT_C2R||Y_TRANS==DFT_C2R||Z_TRANS==DFT_C2R||
X_TRANS==DCT_2||Y_TRANS==DCT_2||Z_TRANS==DCT_2||
X_TRANS==DST_1||Y_TRANS==DST_1||Z_TRANS==DST_1||
X_TRANS==DCT_1||Y_TRANS==DCT_1||Z_TRANS==DCT_1){
do_c2r=1;
int n[1]={NX};
int inembeb[1]={(NX+2)/2};
int onembeb[1]={(NX+2)};
cufftResult r = cufftPlanMany(&plan_c2r,1,n,
inembeb,1,(NX+2)/2,
onembeb,1,(NX+2),
CUFFT_Z2D, (NX+2)*(NX+2));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
}
if(X_TRANS==DST_3||Y_TRANS==DST_3||Z_TRANS==DST_3||
X_TRANS==DCT_3||Y_TRANS==DCT_3||Z_TRANS==DCT_3||
X_TRANS==DFT_R2C||Y_TRANS==DFT_R2C||Z_TRANS==DFT_R2C){
do_r2c=1;
int n[1]={NX};
int inembeb[1]={NX+2};
int onembeb[1]={(NX+2)/2};
cufftResult r = cufftPlanMany(&plan,1,n,
inembeb,1,NX+2,
onembeb,1,(NX+2)/2,
CUFFT_D2Z, (NX+2)*(NX+2));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
}
if(X_TRANS==DST_1){
// printf("x transfor dst1\n");
run_3d_dst_1_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DST_3){
// printf("x transfor dst3\n");
run_3d_dst_3_inplace(d_data,NX+1,plan);
}else if(X_TRANS==DCT_1){
// printf("x transfor dct1\n");
run_3d_dct_1_inplace(d_data,NX+1,plan_c2r);
}else if(X_TRANS==DCT_2){
// printf("x transfor dct2\n");
run_3d_dct_2_inplace(d_data,NX,plan_c2r);
}else if(X_TRANS==DCT_3){
// printf("x transfor dct3\n");
run_3d_dct_3_inplace(d_data,NX,plan);
}else if(X_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NX,plan);
}else if(X_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NX,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
cudaError_t e;
if((e=cudaGetLastError())!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(e));
exit(0);
}
// printf("%s\n",cudaGetErrorString(cudaGetLastError()));
int mat_size1[3]={NX+2,NY+2,NZ+2};
int permutation1[3]={1,0,2};
cut_transpose3d(d_data,d_data,mat_size1,permutation1,1);
cudaDeviceSynchronize();
if(Y_TRANS==DST_1){
// printf("y transfor dst1\n");
run_3d_dst_1_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DST_3){
// printf("y transfor dst3\n");
run_3d_dst_3_inplace(d_data,NY+1,plan);
}else if(Y_TRANS==DCT_1){
// printf("y transfor dct1\n");
run_3d_dct_1_inplace(d_data,NY+1,plan_c2r);
}else if(Y_TRANS==DCT_2){
// printf("y transfor dct2\n");
run_3d_dct_2_inplace(d_data,NY,plan_c2r);
}else if(Y_TRANS==DCT_3){
// printf("y transfor dct3\n");
run_3d_dct_3_inplace(d_data,NY,plan);
}else if(Y_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NY,plan);
}else if(Y_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NY,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
if((e=cudaGetLastError())!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(e));
exit(0);
}
int mat_size2[3]={NY+2,NX+2,NZ+2};
int permutation2[3]={2,0,1};
cut_transpose3d(d_data,d_data,mat_size2,permutation2,1);
cudaDeviceSynchronize();
if(Z_TRANS==DST_1){
// printf("z transfor dst1\n");
run_3d_dst_1_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DST_2){
run_3d_dst_2_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DST_3){
// printf("z transfor dst3\n");
run_3d_dst_3_inplace(d_data,NZ+1,plan);
}else if(Z_TRANS==DCT_1){
// printf("z transfor dct1\n");
run_3d_dct_1_inplace(d_data,NZ+1,plan_c2r);
}else if(Z_TRANS==DCT_2){
// printf("z transfor dct2\n");
run_3d_dct_2_inplace(d_data,NZ,plan_c2r);
}else if(Z_TRANS==DCT_3){
// printf("z transfor dct3\n");
run_3d_dct_3_inplace(d_data,NZ,plan);
}else if(Z_TRANS==DFT_R2C){
run_dft_r2c_inplace(d_data,NZ,plan);
}else if(Z_TRANS==DFT_C2R){
run_dft_c2r_inplace(d_data,NZ,plan_c2r);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
if((e=cudaGetLastError())!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(e));
exit(0);
}
int mat_size4[3]={NZ+2,NY+2,NX+2};
int permutation4[3]={2,1,0};
cut_transpose3d(d_data,d_data,mat_size4,permutation4,1);
cudaDeviceSynchronize();
freeMemory_cubic();
}
void do_transform_nocubic(double *d_data,double *d_out,int NX,int NY,int NZ,transform_kind X_TRANS,transform_kind Y_TRANS,transform_kind Z_TRANS){
if(X_TRANS==DST_1){
// printf("x transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DST_3){
// printf("x transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_data,NX+1,NY+1,NZ+1);
}
else if(X_TRANS==DCT_1){
// printf("x transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_data,NX+1,NY+1,NZ+1);
}
else if(X_TRANS==DCT_2){
// printf("x transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_data,NX,NY,NZ);
}
else if(X_TRANS==DCT_3){
// printf("x transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_data,NX,NY,NZ);
}else if(X_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_data,NX,NY,NZ);
}else if(X_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_data,NX,NY,NZ);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
if(cudaGetLastError()!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(cudaGetLastError()));
exit(0);
}
int mat_size1[3]={NX+2,NY+2,NZ+2};
int permutation1[3]={1,2,0};
cut_transpose3d(d_out,d_data,mat_size1,permutation1,1);
cudaDeviceSynchronize();
if(Y_TRANS==DST_1){
// printf("y transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DST_3){
// printf("y transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_out,NY+1,NZ+1,NX+1);
}
else if(Y_TRANS==DCT_1){
// printf("y transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_out,NY+1,NZ+1,NX+1);
}
else if(Y_TRANS==DCT_2){
// printf("y transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_out,NY,NZ,NX);
}
else if(Y_TRANS==DCT_3){
// printf("y transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_out,NY,NZ,NX);
}else if(Y_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_out,NY,NZ,NX);
}else if(Y_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_out,NY,NZ,NX);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
if(cudaGetLastError()!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(cudaGetLastError()));
exit(0);
}
int mat_size2[3]={NY+2,NZ+2,NX+2};
int permutation2[3]={1,2,0};
cut_transpose3d(d_data,d_out,mat_size2,permutation2,1);
cudaDeviceSynchronize();
if(Z_TRANS==DST_1){
// printf("z transfor dst1\n");
run_3d_dst_1_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DST_2){
run_3d_dst_2_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DST_3){
// printf("z transfor dst3\n");
run_3d_dst_3_inplace_nocubic(d_data,NZ+1,NX+1,NY+1);
}
else if(Z_TRANS==DCT_1){
// printf("z transfor dct1\n");
run_3d_dct_1_inplace_nocubic(d_data,NZ+1,NX+1,NY+1);
}
else if(Z_TRANS==DCT_2){
// printf("z transfor dct2\n");
run_3d_dct_2_inplace_nocubic(d_data,NZ,NX,NY);
}
else if(Z_TRANS==DCT_3){
// printf("z transfor dct3\n");
run_3d_dct_3_inplace_nocubic(d_data,NZ,NX,NY);
}else if(Z_TRANS==DFT_R2C){
run_dft_r2c_inplace_nocubic(d_data,NZ,NX,NY);
}else if(Z_TRANS==DFT_C2R){
run_dft_c2r_inplace_nocubic(d_data,NZ,NX,NY);
}else{
printf("Please input the correct transform kind\n");
}
cudaDeviceSynchronize();
if(cudaGetLastError()!=cudaSuccess){
printf("CUDA ERROR: %s !\n",cudaGetErrorString(cudaGetLastError()));
exit(0);
}
// printf("%s\n",cudaGetErrorString(cudaGetLastError()));
int mat_size3[3]={NZ+2,NX+2,NY+2};
int permutation3[3]={1,2,0};
cut_transpose3d(d_out,d_data,mat_size3,permutation3,1);
cudaDeviceSynchronize();
}
void freeMemory_cubic(){
if(do_r2c==1)
cufftDestroy(plan);
if(do_c2r==1)
cufftDestroy(plan_c2r);
}
|
8babc4db04043ab9c58558a3012f8d7a628b9d31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yoloConfigs.h"
#include "yoloLayer.h"
using namespace Yolo;
namespace nvinfer1 {
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/)
: mThreadCount(cudaThread) {
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin() {
if(mInputBuffer) {
CUDA_CHECK(hipHostFree(mInputBuffer));
}
if(mOutputBuffer) {
CUDA_CHECK(hipHostFree(mOutputBuffer));
}
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
const char *d = reinterpret_cast<const char *>(data), *a = d;
TrtNet::read(d, mClassCount);
TrtNet::read(d, mThreadCount);
TrtNet::read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) {
char* d = static_cast<char*>(buffer), *a = d;
TrtNet::write(d, mClassCount);
TrtNet::write(d, mThreadCount);
TrtNet::write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() {
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize() {
int totalCount = 0;
for(const auto& yolo : mYoloKernel) { // init yolo layer input mem size 13x13x3x(4+1+80)
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width * yolo.height * CHECK_COUNT;
}
CUDA_CHECK(hipHostMalloc(&mInputBuffer, totalCount * sizeof(float), hipHostMallocDefault));
totalCount = 0; // detection count
for(const auto& yolo : mYoloKernel) { // init yolo layer output mem size 13x13x3x(detection size)
totalCount += yolo.width * yolo.height * CHECK_COUNT;
}
CUDA_CHECK(hipHostMalloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), hipHostMallocDefault));
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) {
// output the result to channel
int totalCount = 0;
for(const auto& yolo : mYoloKernel) {
totalCount += yolo.width * yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
}
return Dims3(totalCount + 1, 1, 1);
}
void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, hipStream_t stream,int batchSize) {
auto Logist = [=](float data) { // sigmoid x y cls prob
return 1. / (1. + exp(-data));
};
int totalOutputCount = 0;
int i = 0;
int totalCount = 0;
for(const auto& yolo : mYoloKernel) {
// detection count
totalOutputCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
// detection element count
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
++i;
}
for (int idx = 0; idx < batchSize;idx++) {
i = 0;
// + idx * totalCount; // if create more batch size
float* inputData = (float*)mInputBuffer;
for(const auto& yolo : mYoloKernel) {
int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
// move input data from device to host
CUDA_CHECK(hipMemcpyAsync(inputData, (float *)inputs[i] + idx * size, size * sizeof(float), hipMemcpyDeviceToHost, stream));
inputData += size;
++i;
}
CUDA_CHECK(hipStreamSynchronize(stream));
inputData = (float *)mInputBuffer ;//+ idx *totalCount; //if create more batch size
std::vector<Detection> result;
for (const auto& yolo : mYoloKernel) {
int stride = yolo.width * yolo.height;
for (int j = 0; j < stride; ++j) {
for (int k = 0;k < CHECK_COUNT; ++k ) {
int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j;
int objIndex = beginIdx + LOCATIONS*stride;
// check obj
float objProb = Logist(inputData[objIndex]);
if(objProb <= IGNORE_THRESH) { // first filter by objprob
continue;
}
// classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0; c < mClassCount; ++c) {
float cProb = Logist(inputData[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
Detection det;
int row = j / yolo.width;
int cols = j % yolo.width;
// Location
det.bbox[0] = (cols + Logist(inputData[beginIdx])) / yolo.width; // x
det.bbox[1] = (row + Logist(inputData[beginIdx+stride])) / yolo.height; // y
det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k]; // h
det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1]; // w
det.classId = classId; // cls idx
det.prob = maxProb; // cls prob
result.emplace_back(det);
}
}
}
inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT;
}
int detCount =result.size();
auto data = (float *)mOutputBuffer; // + idx*(totalOutputCount + 1); //if create more batch size
float* begin = data;
// copy count;
data[0] = (float)detCount;
data++;
// copy result
memcpy(data, result.data(), result.size()*sizeof(Detection));
// (count + det result)
CUDA_CHECK(hipMemcpyAsync(outputs, begin,sizeof(float) + result.size()*sizeof(Detection), hipMemcpyHostToDevice, stream));
outputs += totalOutputCount + 1;
}
};
__device__ float Logist(float data) {
return 1. / (1. + exp(-data));
};
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth, int yoloHeight,
const float anchors[CHECK_COUNT*2],
int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) {
return;
}
int stride = yoloWidth * yoloHeight;
int bnIdx = idx / stride;
int curIdx = idx - stride*bnIdx;
const float* curInput = input + bnIdx* ((LOCATIONS + 1 + classes) * stride * CHECK_COUNT);
for (int k = 0;k < CHECK_COUNT; ++k ) {
int beginIdx = (LOCATIONS + 1 + classes)* stride *k + curIdx;
int objIndex = beginIdx + LOCATIONS*stride;
// check objectness
float objProb = Logist(curInput[objIndex]);
if(objProb <= IGNORE_THRESH) {
continue;
}
int row = curIdx / yoloWidth;
int cols = curIdx % yoloWidth;
// classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0; c < classes; ++c){
float cProb = Logist(curInput[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
float* curOutput = output + bnIdx*outputElem;
int resCount = (int)atomicAdd(curOutput, 1); // tensordetection
char* data = (char * )curOutput + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
// Location
det->bbox[0] = (cols + Logist(curInput[beginIdx]))/ yoloWidth; // x
det->bbox[1] = (row + Logist(curInput[beginIdx+stride]))/ yoloHeight; // y
det->bbox[2] = exp(curInput[beginIdx+2*stride]) * anchors[2*k]; // h
det->bbox[3] = exp(curInput[beginIdx+3*stride]) * anchors[2*k + 1]; // w
det->classId = classId; // cls idx
det->prob = maxProb; // cls prob
}
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,
hipStream_t stream,int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen));
int outputElem = 1;
for (unsigned int i = 0;i< mYoloKernel.size();++i) {
const auto& yolo = mYoloKernel[i];
outputElem += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
}
for(int idx = 0 ;idx < batchSize;++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
CUDA_CHECK(hipMemcpy(devAnchor,yolo.anchors,AnchorLen,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(hipFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs,
void* workspace, hipStream_t stream) {
// assert(batchSize == 1);
// GPU
// CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize);
// CPU
// forwardCpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize);
return 0;
};
}
| 8babc4db04043ab9c58558a3012f8d7a628b9d31.cu | #include "yoloConfigs.h"
#include "yoloLayer.h"
using namespace Yolo;
namespace nvinfer1 {
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/)
: mThreadCount(cudaThread) {
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin() {
if(mInputBuffer) {
CUDA_CHECK(cudaFreeHost(mInputBuffer));
}
if(mOutputBuffer) {
CUDA_CHECK(cudaFreeHost(mOutputBuffer));
}
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
const char *d = reinterpret_cast<const char *>(data), *a = d;
TrtNet::read(d, mClassCount);
TrtNet::read(d, mThreadCount);
TrtNet::read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) {
char* d = static_cast<char*>(buffer), *a = d;
TrtNet::write(d, mClassCount);
TrtNet::write(d, mThreadCount);
TrtNet::write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() {
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize() {
int totalCount = 0;
for(const auto& yolo : mYoloKernel) { // init yolo layer input mem size 13x13x3x(4+1+80)
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width * yolo.height * CHECK_COUNT;
}
CUDA_CHECK(cudaHostAlloc(&mInputBuffer, totalCount * sizeof(float), cudaHostAllocDefault));
totalCount = 0; // detection count
for(const auto& yolo : mYoloKernel) { // init yolo layer output mem size 13x13x3x(detection size)
totalCount += yolo.width * yolo.height * CHECK_COUNT;
}
CUDA_CHECK(cudaHostAlloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), cudaHostAllocDefault));
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) {
// output the result to channel
int totalCount = 0;
for(const auto& yolo : mYoloKernel) {
totalCount += yolo.width * yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
}
return Dims3(totalCount + 1, 1, 1);
}
void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, cudaStream_t stream,int batchSize) {
auto Logist = [=](float data) { // sigmoid 操作 x y cls prob
return 1. / (1. + exp(-data));
};
int totalOutputCount = 0;
int i = 0;
int totalCount = 0;
for(const auto& yolo : mYoloKernel) {
// 输出detection count
totalOutputCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
// detection element count
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
++i;
}
for (int idx = 0; idx < batchSize;idx++) {
i = 0;
// + idx * totalCount; // if create more batch size
float* inputData = (float*)mInputBuffer;
for(const auto& yolo : mYoloKernel) {
int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
// move input data from device to host
CUDA_CHECK(cudaMemcpyAsync(inputData, (float *)inputs[i] + idx * size, size * sizeof(float), cudaMemcpyDeviceToHost, stream));
inputData += size;
++i;
}
CUDA_CHECK(cudaStreamSynchronize(stream));
inputData = (float *)mInputBuffer ;//+ idx *totalCount; //if create more batch size
std::vector<Detection> result;
for (const auto& yolo : mYoloKernel) {
int stride = yolo.width * yolo.height;
for (int j = 0; j < stride; ++j) {
for (int k = 0;k < CHECK_COUNT; ++k ) {
int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j;
int objIndex = beginIdx + LOCATIONS*stride;
// check obj
float objProb = Logist(inputData[objIndex]);
if(objProb <= IGNORE_THRESH) { // first filter by objprob
continue;
}
// classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0; c < mClassCount; ++c) {
float cProb = Logist(inputData[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
Detection det;
int row = j / yolo.width;
int cols = j % yolo.width;
// Location
det.bbox[0] = (cols + Logist(inputData[beginIdx])) / yolo.width; // x
det.bbox[1] = (row + Logist(inputData[beginIdx+stride])) / yolo.height; // y
det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k]; // h
det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1]; // w
det.classId = classId; // cls idx
det.prob = maxProb; // cls prob
result.emplace_back(det);
}
}
}
inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT;
}
int detCount =result.size();
auto data = (float *)mOutputBuffer; // + idx*(totalOutputCount + 1); //if create more batch size
float* begin = data;
// copy count;
data[0] = (float)detCount;
data++;
// copy result
memcpy(data, result.data(), result.size()*sizeof(Detection));
// (count + det result)
CUDA_CHECK(cudaMemcpyAsync(outputs, begin,sizeof(float) + result.size()*sizeof(Detection), cudaMemcpyHostToDevice, stream));
outputs += totalOutputCount + 1;
}
};
__device__ float Logist(float data) {
return 1. / (1. + exp(-data));
};
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth, int yoloHeight,
const float anchors[CHECK_COUNT*2],
int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) {
return;
}
int stride = yoloWidth * yoloHeight;
int bnIdx = idx / stride;
int curIdx = idx - stride*bnIdx;
const float* curInput = input + bnIdx* ((LOCATIONS + 1 + classes) * stride * CHECK_COUNT);
for (int k = 0;k < CHECK_COUNT; ++k ) {
int beginIdx = (LOCATIONS + 1 + classes)* stride *k + curIdx;
int objIndex = beginIdx + LOCATIONS*stride;
// check objectness
float objProb = Logist(curInput[objIndex]);
if(objProb <= IGNORE_THRESH) {
continue;
}
int row = curIdx / yoloWidth;
int cols = curIdx % yoloWidth;
// classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0; c < classes; ++c){
float cProb = Logist(curInput[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
float* curOutput = output + bnIdx*outputElem;
int resCount = (int)atomicAdd(curOutput, 1); // 输出tensor第一个数值存储detection的个数
char* data = (char * )curOutput + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
// Location
det->bbox[0] = (cols + Logist(curInput[beginIdx]))/ yoloWidth; // x
det->bbox[1] = (row + Logist(curInput[beginIdx+stride]))/ yoloHeight; // y
det->bbox[2] = exp(curInput[beginIdx+2*stride]) * anchors[2*k]; // h
det->bbox[3] = exp(curInput[beginIdx+3*stride]) * anchors[2*k + 1]; // w
det->classId = classId; // cls idx
det->prob = maxProb; // cls prob
}
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,
cudaStream_t stream,int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen));
int outputElem = 1;
for (unsigned int i = 0;i< mYoloKernel.size();++i) {
const auto& yolo = mYoloKernel[i];
outputElem += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
}
for(int idx = 0 ;idx < batchSize;++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
CUDA_CHECK(cudaMemcpy(devAnchor,yolo.anchors,AnchorLen,cudaMemcpyHostToDevice));
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(cudaFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs,
void* workspace, cudaStream_t stream) {
// assert(batchSize == 1);
// GPU
// CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize);
// CPU
// forwardCpu((const float *const *)inputs,(float *)outputs[0],stream,batchSize);
return 0;
};
}
|
3b15d8aeb3d6ae196aa9e114aaddbd8e33d01712.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _BLOBIMAGEPROCESS_KERNEL_H_
#define _BLOBIMAGEPROCESS_KERNEL_H_
#include "cuda_kernels.h"
#define th_sz 32
__inline__ __device__ float3 warp_reduce_sum_triple(float3 val) {
for (int offset = th_sz/2; offset > 0; offset /= 2) {
val.x += __shfl_xor_sync(0xffffffff, val.x, offset);
val.y += __shfl_xor_sync(0xffffffff, val.y, offset);
val.z += __shfl_xor_sync(0xffffffff, val.z, offset);
}
return val;
}
__inline__ __device__ float warp_reduce_sum_float(float val) {
for (int offset = th_sz/2; offset > 0; offset /= 2) {
val += __shfl_xor_sync(0xffffffff, val, offset);
}
return val;
}
__global__ void compute_centroid(float* __restrict__ output, const float* __restrict__ input) {
output[0] = input[0] / input[2]; // center x
output[1] = input[1] / input[2]; // center x
output[3] = input[2] / 4; // size
float mu_x, mu_y, mu_xy;
mu_x = input[3] / input[2] - output[0] * output[0];
mu_y = input[4] / input[2] - output[1] * output[1];
mu_xy = input[5] / input[2] - output[0] * output[1];
output[2] = atan2f(2 * mu_xy, mu_x - mu_y) / 2; // orientation
}
__global__ void ImageMoment_binarization_kernel(
float* __restrict__ output,
float* __restrict__ input,
const int2 input_size,
const float threshold,
float * __restrict__ d_save
)
{
static __shared__ float shared[th_sz*6]; // warp size
int lane = threadIdx.x;
int wid = threadIdx.y;
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int gridId = blockIdx.y * gridDim.x + blockIdx.x;
int gridSz = gridDim.x*gridDim.y;
float3 g = { 0 };
float3 h = { 0 };
for (int y = idx_y; y < input_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < input_size.x; x += blockDim.x * gridDim.x) {
int gid = y*input_size.x + x;
d_save[gid] = 0.0f;
if (input[gid] > threshold) {
g.x += x;
g.y += y;
g.z += 1;
h.x += x*x;
h.y += y*y;
h.z += x*y;
d_save[gid] = 255.0f;
}
}
}
float3 g_sum = warp_reduce_sum_triple(g);
float3 h_sum = warp_reduce_sum_triple(h);
if (lane == 0) {
shared[wid] = g_sum.x;
shared[wid + th_sz] = g_sum.y;
shared[wid + 2*th_sz] = g_sum.z;
shared[wid + 3*th_sz] = h_sum.x;
shared[wid + 4*th_sz] = h_sum.y;
shared[wid + 5*th_sz] = h_sum.z;
}
__syncthreads();
if (wid == 0) {
float shared_sum1 = warp_reduce_sum_float(shared[lane]);
float shared_sum2 = warp_reduce_sum_float(shared[lane + th_sz]);
float shared_sum3 = warp_reduce_sum_float(shared[lane + 2*th_sz]);
float shared_sum4 = warp_reduce_sum_float(shared[lane + 3*th_sz]);
float shared_sum5 = warp_reduce_sum_float(shared[lane + 4*th_sz]);
float shared_sum6 = warp_reduce_sum_float(shared[lane + 5*th_sz]);
if (lane == 0) {
output[gridId] = shared_sum1;
output[gridSz + gridId] = shared_sum2;
output[gridSz * 2 + gridId] = shared_sum3;
output[gridSz * 3 + gridId] = shared_sum4;
output[gridSz * 4 + gridId] = shared_sum5;
output[gridSz * 5 + gridId] = shared_sum6;
}
}
}
extern "C" void getImageMoment_GPU(
float* output, // 1024 float size
float* input, // input image
int width, int height,
float threshold,
float* d_save // output binary image
) {
dim3 _threads(th_sz, th_sz);
dim3 _blocks(1, 1);
int2 i_size = { width, height };
ImageMoment_binarization_kernel << <_blocks, _threads >> >(output, input, i_size, threshold, d_save);
compute_centroid<< <1, 1>> > (output + 6, output);
};
#endif
| 3b15d8aeb3d6ae196aa9e114aaddbd8e33d01712.cu | #ifndef _BLOBIMAGEPROCESS_KERNEL_H_
#define _BLOBIMAGEPROCESS_KERNEL_H_
#include "cuda_kernels.h"
#define th_sz 32
__inline__ __device__ float3 warp_reduce_sum_triple(float3 val) {
for (int offset = th_sz/2; offset > 0; offset /= 2) {
val.x += __shfl_xor_sync(0xffffffff, val.x, offset);
val.y += __shfl_xor_sync(0xffffffff, val.y, offset);
val.z += __shfl_xor_sync(0xffffffff, val.z, offset);
}
return val;
}
__inline__ __device__ float warp_reduce_sum_float(float val) {
for (int offset = th_sz/2; offset > 0; offset /= 2) {
val += __shfl_xor_sync(0xffffffff, val, offset);
}
return val;
}
__global__ void compute_centroid(float* __restrict__ output, const float* __restrict__ input) {
output[0] = input[0] / input[2]; // center x
output[1] = input[1] / input[2]; // center x
output[3] = input[2] / 4; // size
float mu_x, mu_y, mu_xy;
mu_x = input[3] / input[2] - output[0] * output[0];
mu_y = input[4] / input[2] - output[1] * output[1];
mu_xy = input[5] / input[2] - output[0] * output[1];
output[2] = atan2f(2 * mu_xy, mu_x - mu_y) / 2; // orientation
}
__global__ void ImageMoment_binarization_kernel(
float* __restrict__ output,
float* __restrict__ input,
const int2 input_size,
const float threshold,
float * __restrict__ d_save
)
{
static __shared__ float shared[th_sz*6]; // warp size
int lane = threadIdx.x;
int wid = threadIdx.y;
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
int gridId = blockIdx.y * gridDim.x + blockIdx.x;
int gridSz = gridDim.x*gridDim.y;
float3 g = { 0 };
float3 h = { 0 };
for (int y = idx_y; y < input_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < input_size.x; x += blockDim.x * gridDim.x) {
int gid = y*input_size.x + x;
d_save[gid] = 0.0f;
if (input[gid] > threshold) {
g.x += x;
g.y += y;
g.z += 1;
h.x += x*x;
h.y += y*y;
h.z += x*y;
d_save[gid] = 255.0f;
}
}
}
float3 g_sum = warp_reduce_sum_triple(g);
float3 h_sum = warp_reduce_sum_triple(h);
if (lane == 0) {
shared[wid] = g_sum.x;
shared[wid + th_sz] = g_sum.y;
shared[wid + 2*th_sz] = g_sum.z;
shared[wid + 3*th_sz] = h_sum.x;
shared[wid + 4*th_sz] = h_sum.y;
shared[wid + 5*th_sz] = h_sum.z;
}
__syncthreads();
if (wid == 0) {
float shared_sum1 = warp_reduce_sum_float(shared[lane]);
float shared_sum2 = warp_reduce_sum_float(shared[lane + th_sz]);
float shared_sum3 = warp_reduce_sum_float(shared[lane + 2*th_sz]);
float shared_sum4 = warp_reduce_sum_float(shared[lane + 3*th_sz]);
float shared_sum5 = warp_reduce_sum_float(shared[lane + 4*th_sz]);
float shared_sum6 = warp_reduce_sum_float(shared[lane + 5*th_sz]);
if (lane == 0) {
output[gridId] = shared_sum1;
output[gridSz + gridId] = shared_sum2;
output[gridSz * 2 + gridId] = shared_sum3;
output[gridSz * 3 + gridId] = shared_sum4;
output[gridSz * 4 + gridId] = shared_sum5;
output[gridSz * 5 + gridId] = shared_sum6;
}
}
}
extern "C" void getImageMoment_GPU(
float* output, // 1024 float size
float* input, // input image
int width, int height,
float threshold,
float* d_save // output binary image
) {
dim3 _threads(th_sz, th_sz);
dim3 _blocks(1, 1);
int2 i_size = { width, height };
ImageMoment_binarization_kernel << <_blocks, _threads >> >(output, input, i_size, threshold, d_save);
compute_centroid<< <1, 1>> > (output + 6, output);
};
#endif
|
1fd90a6d133e75ad10cb262aa3b8770efdb452a6.hip | // !!! This is a file automatically generated by hipify!!!
/* C stuff */
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
// C++ stuff
#include <iostream>
#include <fstream>
#include <string>
#include <iomanip>
#include <sstream>
// Open-CV for the vision stuff
//#include <opencv2/opencv.hpp>
/* Cuda stuff */
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
typedef unsigned char byte;
typedef byte * pbyte;
clock_t LastProfilingClock=clock();
#define ARCH_NEWLINE "\n"
/***************************************************************************
Writes profiling output (milli-seconds since last call)
***************************************************************************/
extern clock_t LastProfilingClock;
inline float profiling (const char *s, clock_t *whichClock=NULL)
{
if (whichClock==NULL)
whichClock=&LastProfilingClock;
clock_t newClock=clock();
float res = (float) (newClock-*whichClock) / (float) CLOCKS_PER_SEC;
if (s!=NULL)
std::cerr << "Time: " << s << ": " << res << std::endl;
*whichClock = newClock;
return res;
}
inline float profilingTime (const char *s, time_t *whichClock)
{
time_t newTime=time(NULL);
float res = (float) (newTime-*whichClock);
if (s!=NULL)
std::cerr << "Time(real): " << s << ": " << res << std::endl;
return res;
}
/***************************************************************************
CREATES AN EMPTY IMAGE
***************************************************************************/
unsigned char **CREATE_IMAGE (int ysize, int xsize) {
unsigned char ** im;
unsigned char *big;
im = new pbyte [xsize];
big = new byte [xsize*ysize];
for (int i = 0 ; i < xsize ; i++)
im[i] = big + i*ysize;
return (im);
}
/***************************************************************************
Frees an image
***************************************************************************/
void FREE_IMAGE (byte **im)
{
delete [] im[0];
delete [] im;
}
/***************************************************************************
Reads a grayscale image
***************************************************************************/
void readImage (const char *filename, unsigned char***_p2darr, int *_ysize, int *_xsize) {
char *buf;
char shortbuf[256];
short int x, y;
int color, foo;
char c;
FILE * inpic;
int entete, z;
int ysize, xsize;
unsigned char **R;
if ( (inpic = fopen(filename,"r+b")) == NULL)
{
std::cerr << "can't open file '" << filename << "': " << strerror(errno) << std::endl;
exit(1);
}
if (fscanf(inpic,"%c%c\n",&c,&c) != 2)
{
std::cerr << "Image::readGray():\n Wrong Image Format: no .ppm!!\n"
<< "filename: " << filename << std::endl;
exit(2);
}
if (c == '6')
{
z = 3 ;
std::cerr << "Image::readGray():: disabled due to bug.\n"
"Use Image::readColor() + Image::convertRGB2GrayScale() instead\n";
exit(3);
}
else
{
if (c != '5') {
std::cerr << "Image::readGray():: wrong image format: "
"for .ppm only versions P5 and P6 are supported!\n";
exit(4);
}
z = 1 ;
}
fscanf(inpic,"%c",&c) ;
entete = 3 ;
while (c == '#') {
entete++ ;
while (c != '\n') {
entete++ ;
fscanf(inpic,"%c",&c) ;
}
fscanf(inpic,"%c",&c) ;
}
if ( (inpic = freopen(filename,"r+b",inpic)) == NULL) {
std::cerr << "can't open file " << filename << ":" << strerror(errno) << "\n";
exit(5);
}
fread(shortbuf,1,entete,inpic);
if (fscanf(inpic,"%d%d\n%d",&xsize,&ysize,&color) != 3) {
std::cerr << "Internal error (2):" << filename << std::endl;
exit(6);
}
fread(shortbuf,1,1,inpic) ;
buf = new char [z*xsize+10];
R = CREATE_IMAGE(ysize,xsize) ;
for ( y = 0 ; y < ysize ; y++)
{
if ((foo=fread(buf,1,z*xsize,inpic)) != z*xsize)
{
std::ostringstream s;
s << "file " << filename << ":\nrow " << y << " input failure: "
<< "got " << foo << " instead of " << z*xsize << " bytes!\n";
if (!feof(inpic))
s << "No ";
s << "EOF occured.\n";
if (!ferror(inpic))
s << "No ";
std::cerr << "error in the sense of ferror() occured.\n";
exit(7);
}
else
{
if (z == 1)
{
for ( x = 0 ; x < xsize ; x++)
R[x][y] = buf[x] ;
}
else
{
for ( x = 0 ; x < z*xsize ; x += z )
R[x/z][y] = (int)(.299*(float)buf[x] + 0.587*(float)buf[x+1]
+ 0.114*(float)buf[x+2]);
}
}
}
fclose (inpic);
delete [] buf;
*_ysize = ysize;
*_xsize = xsize;
*_p2darr = R;
}
// *************************************************************
// Writes a grayscale image
// *************************************************************
void writeImage(const char *filename, unsigned char **R, int ysize, int xsize)
{
FILE *fp;
char *buf;
short int y, x;
if ((fp=fopen(filename,"w+b"))==NULL)
{
std::cerr << "Cannot create output file '" << filename << "': " << strerror(errno) << "!\n";
exit(1);
}
buf = new char [xsize+10];
sprintf(buf,"P5%s%d %d%s255%s",ARCH_NEWLINE,xsize,ysize,ARCH_NEWLINE,ARCH_NEWLINE) ;
x = strlen(buf);
clearerr(fp);
fwrite(buf,1,x,fp);
if (ferror(fp))
{
std::cerr << "Could not write image to file (Image::writeGray())!\n";
exit(1);
}
for ( y = 0 ; y < ysize ; y++) {
for ( x = 0 ; x < xsize ; x++ ) {
buf[x] = R[x][y];
}
clearerr(fp);
fwrite(buf,1,xsize,fp);
if (ferror(fp))
{
std::cerr << "Could not write image to file (Image::writeGray())!\n";
exit(1);
}
}
delete [] buf;
fclose(fp);
}
/***************************************************************************
USAGE
***************************************************************************/
void usage (char *com)
{
std::cerr<< "usage: " << com << " <inputimagename> <outputimagename>\n";
exit(1);
}
/***************************************************************************
The CPU version
***************************************************************************/
void cpuFilter(unsigned char *in, unsigned char *out, int rows, int cols)
{
// General case
for (int y=1; y<rows-1; ++y)
for (int x=1; x<cols-1; ++x)
{
float f = (
4.0*in[x*rows+y] +
2.0*in[(x-1)*rows+y] +
2.0*in[(x+2)*rows+y] +
2.0*in[x*rows+y+1] +
2.0*in[x*rows+y-1] +
in[(x-1)*rows+y-1] +
in[(x-1)*rows+y+1] +
in[(x+1)*rows+y-1] +
in[(x+1)*rows+y+1]
)/16.0;
if (f<0) f=0;
if (f>255) f=255;
out[x*rows+y] = (unsigned char) f;
}
// Borders
for (int y=0; y<rows; ++y)
{
out[0*rows+y] = in[0*rows+y];
out[(cols-1)*rows+y] = in[(cols-1)*rows+y];
}
for (int x=0; x<cols; ++x)
{
out[x*rows+0] = in[x*rows+0];
out[x*rows+rows-1] = in[x*rows+rows-1];
}
}
/***************************************************************************
The GPU version - the kernel
***************************************************************************/
__global__
void gpuHostRun(int mxWidth, unsigned char* input, unsigned char* output)
{
int x = blockIdx.x*blockDim.x + threadIdx.x; // cols
int y = blockIdx.y*blockDim.y + threadIdx.y; // rows
if(y*mxWidth + x <= mxWidth*mxWidth)
{
if(!(y == mxWidth-1 || y == 0 || x == mxWidth-1 || x == 0)){
float f = (
4.0*input[x*mxWidth+y] +
2.0*input[(x-1)*mxWidth+y] +
2.0*input[(x+2)*mxWidth+y] +
2.0*input[x*mxWidth+y+1] +
2.0*input[x*mxWidth+y-1] +
input[(x-1)*mxWidth+y-1] +
input[(x-1)*mxWidth+y+1] +
input[(x+1)*mxWidth+y-1] +
input[(x+1)*mxWidth+y+1]
)/16.0;
if (f<0) f=0;
if (f>255) f=255;
output[x*mxWidth+y] = (unsigned char) f;
}
else {
output[x*mxWidth+y] = input[x*mxWidth+y];
}
}
}
/***************************************************************************
The GPU version - the host code
***************************************************************************/
void gpuFilter(unsigned char *imarr, unsigned char *resarr, int rows, int cols ) // dimY == nbRows, dimX == nbCol
{
unsigned char *gpuMatrix1; //input
unsigned char *gpuMatrix2; //output
int matrixInByte = rows*cols*sizeof(char);
hipMalloc((void**) &gpuMatrix1, matrixInByte);
hipMalloc((void**) &gpuMatrix2, matrixInByte);
hipError_t ok = hipMemcpy(gpuMatrix1, imarr, matrixInByte, hipMemcpyHostToDevice );
if(ok != hipSuccess)
{
std::cerr <<"*** Could not transfer\n";
exit(1);
}
dim3 dimBlock(32,32);
dim3 dimGrid(cols/dimBlock.x,rows/dimBlock.y);
hipLaunchKernelGGL(( gpuHostRun), dim3(dimGrid), dim3(dimBlock), 0, 0, cols, gpuMatrix1, gpuMatrix2);
hipMemcpy(resarr, gpuMatrix2, matrixInByte, hipMemcpyDeviceToHost );
if(ok != hipSuccess)
{
std::cerr <<"*** Could not transfer\n";
exit(1);
}
}
/***************************************************************************
Main program
***************************************************************************/
int main (int argc, char **argv)
{
int c;
// Argument processing
while ((c = getopt (argc, argv, "h")) != EOF)
{
switch (c) {
case 'h':
usage(*argv);
break;
case '?':
usage (*argv);
std::cerr << "\n" << "*** Problem parsing the options!\n\n";
exit (1);
}
}
int requiredArgs=2;
if (argc-optind!=requiredArgs)
{
usage (*argv);
exit (1);
}
char *inputfname=argv[optind];
char *outputfname=argv[optind+1];
std::cout << "Reading image " << inputfname << "\n";
unsigned char **image;
int rows;
int cols;
readImage (inputfname, &image, &rows, &cols);
std::cout << "=====================================================\n"
<< "Loaded image of size " << cols << "x" << rows << ".\n";
unsigned char *imarr = *image;
unsigned char *resarr = new unsigned char [cols*rows];
profiling (NULL);
for (int i=0; i<100; ++i)
cpuFilter(imarr, resarr, rows, cols);
profiling ("CPU version");
for (int i=0; i<100; ++i)
gpuFilter(imarr, resarr, rows, cols);
profiling ("GPU version");
// Copy flat array back to image structure
for (int i=0; i<rows*cols; ++i)
imarr[i] = resarr[i];
writeImage (outputfname, image, rows, cols);
std::cout << "Program terminated correctly.\n";
return 0;
}
| 1fd90a6d133e75ad10cb262aa3b8770efdb452a6.cu | /* C stuff */
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
// C++ stuff
#include <iostream>
#include <fstream>
#include <string>
#include <iomanip>
#include <sstream>
// Open-CV for the vision stuff
//#include <opencv2/opencv.hpp>
/* Cuda stuff */
#include <cuda_runtime_api.h>
#include <cuda.h>
typedef unsigned char byte;
typedef byte * pbyte;
clock_t LastProfilingClock=clock();
#define ARCH_NEWLINE "\n"
/***************************************************************************
Writes profiling output (milli-seconds since last call)
***************************************************************************/
extern clock_t LastProfilingClock;
inline float profiling (const char *s, clock_t *whichClock=NULL)
{
if (whichClock==NULL)
whichClock=&LastProfilingClock;
clock_t newClock=clock();
float res = (float) (newClock-*whichClock) / (float) CLOCKS_PER_SEC;
if (s!=NULL)
std::cerr << "Time: " << s << ": " << res << std::endl;
*whichClock = newClock;
return res;
}
inline float profilingTime (const char *s, time_t *whichClock)
{
time_t newTime=time(NULL);
float res = (float) (newTime-*whichClock);
if (s!=NULL)
std::cerr << "Time(real): " << s << ": " << res << std::endl;
return res;
}
/***************************************************************************
CREATES AN EMPTY IMAGE
***************************************************************************/
unsigned char **CREATE_IMAGE (int ysize, int xsize) {
unsigned char ** im;
unsigned char *big;
im = new pbyte [xsize];
big = new byte [xsize*ysize];
for (int i = 0 ; i < xsize ; i++)
im[i] = big + i*ysize;
return (im);
}
/***************************************************************************
Frees an image
***************************************************************************/
void FREE_IMAGE (byte **im)
{
delete [] im[0];
delete [] im;
}
/***************************************************************************
Reads a grayscale image
***************************************************************************/
void readImage (const char *filename, unsigned char***_p2darr, int *_ysize, int *_xsize) {
char *buf;
char shortbuf[256];
short int x, y;
int color, foo;
char c;
FILE * inpic;
int entete, z;
int ysize, xsize;
unsigned char **R;
if ( (inpic = fopen(filename,"r+b")) == NULL)
{
std::cerr << "can't open file '" << filename << "': " << strerror(errno) << std::endl;
exit(1);
}
if (fscanf(inpic,"%c%c\n",&c,&c) != 2)
{
std::cerr << "Image::readGray():\n Wrong Image Format: no .ppm!!\n"
<< "filename: " << filename << std::endl;
exit(2);
}
if (c == '6')
{
z = 3 ;
std::cerr << "Image::readGray():: disabled due to bug.\n"
"Use Image::readColor() + Image::convertRGB2GrayScale() instead\n";
exit(3);
}
else
{
if (c != '5') {
std::cerr << "Image::readGray():: wrong image format: "
"for .ppm only versions P5 and P6 are supported!\n";
exit(4);
}
z = 1 ;
}
fscanf(inpic,"%c",&c) ;
entete = 3 ;
while (c == '#') {
entete++ ;
while (c != '\n') {
entete++ ;
fscanf(inpic,"%c",&c) ;
}
fscanf(inpic,"%c",&c) ;
}
if ( (inpic = freopen(filename,"r+b",inpic)) == NULL) {
std::cerr << "can't open file " << filename << ":" << strerror(errno) << "\n";
exit(5);
}
fread(shortbuf,1,entete,inpic);
if (fscanf(inpic,"%d%d\n%d",&xsize,&ysize,&color) != 3) {
std::cerr << "Internal error (2):" << filename << std::endl;
exit(6);
}
fread(shortbuf,1,1,inpic) ;
buf = new char [z*xsize+10];
R = CREATE_IMAGE(ysize,xsize) ;
for ( y = 0 ; y < ysize ; y++)
{
if ((foo=fread(buf,1,z*xsize,inpic)) != z*xsize)
{
std::ostringstream s;
s << "file " << filename << ":\nrow " << y << " input failure: "
<< "got " << foo << " instead of " << z*xsize << " bytes!\n";
if (!feof(inpic))
s << "No ";
s << "EOF occured.\n";
if (!ferror(inpic))
s << "No ";
std::cerr << "error in the sense of ferror() occured.\n";
exit(7);
}
else
{
if (z == 1)
{
for ( x = 0 ; x < xsize ; x++)
R[x][y] = buf[x] ;
}
else
{
for ( x = 0 ; x < z*xsize ; x += z )
R[x/z][y] = (int)(.299*(float)buf[x] + 0.587*(float)buf[x+1]
+ 0.114*(float)buf[x+2]);
}
}
}
fclose (inpic);
delete [] buf;
*_ysize = ysize;
*_xsize = xsize;
*_p2darr = R;
}
// *************************************************************
// Writes a grayscale image
// *************************************************************
void writeImage(const char *filename, unsigned char **R, int ysize, int xsize)
{
FILE *fp;
char *buf;
short int y, x;
if ((fp=fopen(filename,"w+b"))==NULL)
{
std::cerr << "Cannot create output file '" << filename << "': " << strerror(errno) << "!\n";
exit(1);
}
buf = new char [xsize+10];
sprintf(buf,"P5%s%d %d%s255%s",ARCH_NEWLINE,xsize,ysize,ARCH_NEWLINE,ARCH_NEWLINE) ;
x = strlen(buf);
clearerr(fp);
fwrite(buf,1,x,fp);
if (ferror(fp))
{
std::cerr << "Could not write image to file (Image::writeGray())!\n";
exit(1);
}
for ( y = 0 ; y < ysize ; y++) {
for ( x = 0 ; x < xsize ; x++ ) {
buf[x] = R[x][y];
}
clearerr(fp);
fwrite(buf,1,xsize,fp);
if (ferror(fp))
{
std::cerr << "Could not write image to file (Image::writeGray())!\n";
exit(1);
}
}
delete [] buf;
fclose(fp);
}
/***************************************************************************
USAGE
***************************************************************************/
void usage (char *com)
{
std::cerr<< "usage: " << com << " <inputimagename> <outputimagename>\n";
exit(1);
}
/***************************************************************************
The CPU version
***************************************************************************/
void cpuFilter(unsigned char *in, unsigned char *out, int rows, int cols)
{
// General case
for (int y=1; y<rows-1; ++y)
for (int x=1; x<cols-1; ++x)
{
float f = (
4.0*in[x*rows+y] +
2.0*in[(x-1)*rows+y] +
2.0*in[(x+2)*rows+y] +
2.0*in[x*rows+y+1] +
2.0*in[x*rows+y-1] +
in[(x-1)*rows+y-1] +
in[(x-1)*rows+y+1] +
in[(x+1)*rows+y-1] +
in[(x+1)*rows+y+1]
)/16.0;
if (f<0) f=0;
if (f>255) f=255;
out[x*rows+y] = (unsigned char) f;
}
// Borders
for (int y=0; y<rows; ++y)
{
out[0*rows+y] = in[0*rows+y];
out[(cols-1)*rows+y] = in[(cols-1)*rows+y];
}
for (int x=0; x<cols; ++x)
{
out[x*rows+0] = in[x*rows+0];
out[x*rows+rows-1] = in[x*rows+rows-1];
}
}
/***************************************************************************
The GPU version - the kernel
***************************************************************************/
__global__
void gpuHostRun(int mxWidth, unsigned char* input, unsigned char* output)
{
int x = blockIdx.x*blockDim.x + threadIdx.x; // cols
int y = blockIdx.y*blockDim.y + threadIdx.y; // rows
if(y*mxWidth + x <= mxWidth*mxWidth)
{
if(!(y == mxWidth-1 || y == 0 || x == mxWidth-1 || x == 0)){
float f = (
4.0*input[x*mxWidth+y] +
2.0*input[(x-1)*mxWidth+y] +
2.0*input[(x+2)*mxWidth+y] +
2.0*input[x*mxWidth+y+1] +
2.0*input[x*mxWidth+y-1] +
input[(x-1)*mxWidth+y-1] +
input[(x-1)*mxWidth+y+1] +
input[(x+1)*mxWidth+y-1] +
input[(x+1)*mxWidth+y+1]
)/16.0;
if (f<0) f=0;
if (f>255) f=255;
output[x*mxWidth+y] = (unsigned char) f;
}
else {
output[x*mxWidth+y] = input[x*mxWidth+y];
}
}
}
/***************************************************************************
The GPU version - the host code
***************************************************************************/
void gpuFilter(unsigned char *imarr, unsigned char *resarr, int rows, int cols ) // dimY == nbRows, dimX == nbCol
{
unsigned char *gpuMatrix1; //input
unsigned char *gpuMatrix2; //output
int matrixInByte = rows*cols*sizeof(char);
cudaMalloc((void**) &gpuMatrix1, matrixInByte);
cudaMalloc((void**) &gpuMatrix2, matrixInByte);
cudaError_t ok = cudaMemcpy(gpuMatrix1, imarr, matrixInByte, cudaMemcpyHostToDevice );
if(ok != cudaSuccess)
{
std::cerr <<"*** Could not transfer\n";
exit(1);
}
dim3 dimBlock(32,32);
dim3 dimGrid(cols/dimBlock.x,rows/dimBlock.y);
gpuHostRun<<<dimGrid, dimBlock>>>(cols, gpuMatrix1, gpuMatrix2);
cudaMemcpy(resarr, gpuMatrix2, matrixInByte, cudaMemcpyDeviceToHost );
if(ok != cudaSuccess)
{
std::cerr <<"*** Could not transfer\n";
exit(1);
}
}
/***************************************************************************
Main program
***************************************************************************/
int main (int argc, char **argv)
{
int c;
// Argument processing
while ((c = getopt (argc, argv, "h")) != EOF)
{
switch (c) {
case 'h':
usage(*argv);
break;
case '?':
usage (*argv);
std::cerr << "\n" << "*** Problem parsing the options!\n\n";
exit (1);
}
}
int requiredArgs=2;
if (argc-optind!=requiredArgs)
{
usage (*argv);
exit (1);
}
char *inputfname=argv[optind];
char *outputfname=argv[optind+1];
std::cout << "Reading image " << inputfname << "\n";
unsigned char **image;
int rows;
int cols;
readImage (inputfname, &image, &rows, &cols);
std::cout << "=====================================================\n"
<< "Loaded image of size " << cols << "x" << rows << ".\n";
unsigned char *imarr = *image;
unsigned char *resarr = new unsigned char [cols*rows];
profiling (NULL);
for (int i=0; i<100; ++i)
cpuFilter(imarr, resarr, rows, cols);
profiling ("CPU version");
for (int i=0; i<100; ++i)
gpuFilter(imarr, resarr, rows, cols);
profiling ("GPU version");
// Copy flat array back to image structure
for (int i=0; i<rows*cols; ++i)
imarr[i] = resarr[i];
writeImage (outputfname, image, rows, cols);
std::cout << "Program terminated correctly.\n";
return 0;
}
|
00473f05bf413b53858eb6cde8eb9729ad49ca81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 exercise
//2 prac 1-3
//3 Fibonacci
//4 gen
//5 hw1
#define prac 2
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
/*
if block size is 8, / gpu: 13.84512
if block size is 16, / gpu: 8.617824
if block size is 24, / gpu: 9.498592
if block size is 32, cpu: 79.871399 / gpu: 9.26400
*/
#if prac==1
typedef struct {
int width;
int height;
float *elements;
} Array;
#define MAX_N_ELEMENTS (1 << 25)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float *x, float *y, float *z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id])+ cos(A.elements[id])*sin(B.elements[id]));
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 32;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width =1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
//CHECK_TIME_START;
hipError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
//CHECK_TIME_END(compute_time);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
// .
CHECK_TIME_INIT_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_A.elements, size))
CUDA_CALL(hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_B.elements, size))
CUDA_CALL(hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CHECK_TIME_START_GPU()
hipLaunchKernelGGL(( CombineTwoArrraysKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_A, d_B, d_C);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
return cudaStatus;
}
#endif
//N = 4096
//CPU: 11.082584
//Block Size 32 -> 3.419040
//Block Size 24 -> 0.3448
//Block Size 16 -> 0.371936
//Block Size 8 -> 0.369760
#if prac==2
typedef struct {
int width;
int height;
float *elements;
} Array;
int n;
#define BLOCK_SIZE 16
const int ELEM_PER_VECTOR = 32;
float (*pVecX)[ELEM_PER_VECTOR], (*pVecY)[ELEM_PER_VECTOR], (*pVecY_G)[ELEM_PER_VECTOR];
float(*pMatA)[ELEM_PER_VECTOR];
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
printf("n: %d\n", n);
pVecX = new float[n][ELEM_PER_VECTOR];
pVecY = new float[n][ELEM_PER_VECTOR];
pVecY_G = new float[n][ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int i, j, k;
float sum;
for( k = 0 ; k < n; k++){
for( i = 0 ; i < ELEM_PER_VECTOR ; i++){
sum = 0;
for(j = 0 ; j < ELEM_PER_VECTOR ; j++){
sum += pMatA[i][j] * pVecX[k][j];
}
pVecY[k][i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(const Array VecX, const Array MatA, Array VecY)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int id = gridDim.x*blockDim.x*row + col;
int i,j,k;
VecY.elements[row*ELEM_PER_VECTOR + col] = 0;
for(k = 0 ; k < ELEM_PER_VECTOR ; k++)
VecY.elements[row*ELEM_PER_VECTOR + col] += (MatA.elements[col * ELEM_PER_VECTOR + k] * VecX.elements[row*ELEM_PER_VECTOR+k]);
}
void Mat_Vec_Multiply_GPU()
{
// .
CHECK_TIME_INIT_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array _pVecX, _pMatA, _pVecY_G;
size_t size;
_pVecX.width = ELEM_PER_VECTOR; _pVecX.height = n;
size = n * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(hipMalloc(&_pVecX.elements, size))
CUDA_CALL(hipMemcpy(_pVecX.elements, pVecX, size, hipMemcpyHostToDevice))
_pMatA.width = ELEM_PER_VECTOR; _pVecX.height = ELEM_PER_VECTOR;
size = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(hipMalloc(&_pMatA.elements, size))
CUDA_CALL(hipMemcpy(_pMatA.elements, pMatA, size, hipMemcpyHostToDevice))
_pVecY_G.width = ELEM_PER_VECTOR; _pVecY_G.height = n;
size = n * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(hipMalloc(&_pVecY_G.elements, n*ELEM_PER_VECTOR))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((ELEM_PER_VECTOR *ELEM_PER_VECTOR )/ dimBlock.x, n / dimBlock.y);
dim3 dimGrid(_pVecX.width/ dimBlock.x, _pVecX.height / dimBlock.y); //32 n
CHECK_TIME_START_GPU()
hipLaunchKernelGGL(( Mat_Vec_Multiply_Kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, _pVecX, _pMatA, _pVecY_G);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(pVecY_G, _pVecY_G.elements, n*ELEM_PER_VECTOR, hipMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
hipFree(_pVecX.elements);
hipFree(_pMatA.elements);
hipFree(_pVecY_G.elements);
}
int main()
{
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0][0], compute_time);
Mat_Vec_Multiply_GPU();
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0][0], device_time);
}
#endif
#if prac==3
#define BLOCK_SIZE 8
#define N 67108864 // 8192 * 8192 = 2^13 * 2^13
int Fibonacci(int n) {
// DO NOT MODIFY THIS FUNCTION!!!
float sqrt_5, x_0, x_1;
float tmp_0, tmp_1;
sqrt_5 = sqrtf(5.0f);
x_0 = (1.0f + sqrt_5) / 2.0f;
x_1 = (1.0f - sqrt_5) / 2.0f;
tmp_0 = tmp_1 = 1.0f;
for (int i = 0; i < n; i++) {
tmp_0 *= x_0;
tmp_1 *= x_1;
}
return (int)((tmp_0 - tmp_1) / sqrt_5 + 0.5);
}
void generate_input(int *x, int n) {
// DO NOT MODIFY THIS FUNCTION!!!
srand((unsigned int)time(NULL));
for (int i = 0; i < n; i++) {
x[i] = 35 + (int)(5.0f * rand() / RAND_MAX + 0.5f);
}
}
__global__ void Fibonacci_Kernel(int *x, int *y)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
//int id = threadIdx.x;
float sqrt_5, x_0, x_1;
float tmp_0, tmp_1;
sqrt_5 = sqrtf(5.0f);
x_0 = (1.0f + sqrt_5) / 2.0f;
x_1 = (1.0f - sqrt_5) / 2.0f;
tmp_0 = tmp_1 = 1.0f;
for (int i = 0; i < x[id]; i++) {
tmp_0 *= x_0;
tmp_1 *= x_1;
}
y[id] = (int)((tmp_0 - tmp_1) / sqrt_5 + 0.5);
}
void Fibonacci_GPU(int *x, int *y)
{
CHECK_TIME_INIT_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
int *_y, *_x;
size_t size = N * sizeof(int);
size_t rtsize = (size_t)(sqrt((float)N));
CUDA_CALL(hipMalloc(&_y, size))
CUDA_CALL(hipMalloc(&_x, size))
CUDA_CALL(hipMemcpy(_x, x, size, hipMemcpyHostToDevice))
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid( rtsize / dimBlock.x, rtsize / dimBlock.y);
dim3 dimGrid( 1024 / dimBlock.x, (N/1024) / dimBlock.y);
CHECK_TIME_START_GPU()
hipLaunchKernelGGL(( Fibonacci_Kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, _x, _y);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(hipGetLastError())
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(y, _y, size, hipMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
hipFree(_x);
hipFree(_y);
}
int *x, *y_c, *y_g; // input/output arrays
void main(void) {
int n, i;
FILE *fp;
// Read the input array from the input file if one already exists.
fp = fopen("x.binary", "rb");
if (!fp) {
fprintf(stderr, "Error: cannot open the input file...\n");
exit(-1);
}
fread(&n, sizeof(int), 1, fp);
fprintf(stdout, "\n*** The problem size is %d.\n", n);
x = (int *)malloc(sizeof(int)*n);
if (!x) {
fprintf(stderr, "Error: cannot allocate memory for the input array...\n");
exit(-1);
}
fread(x, sizeof(int), n, fp);
fclose(fp);
y_c = (int *)malloc(sizeof(int)*n);
y_g = (int *)malloc(sizeof(int)*n);
//CPU
CHECK_TIME_START;
for (i = 0; i < n; i++) {
y_c[i] = Fibonacci(x[i]);
}
CHECK_TIME_END(compute_time);
fprintf(stdout, "\n***_CPU_ Time taken for computing %d Fibonacci numbers is %.6fms\n\n", n, compute_time);
//GPU
Fibonacci_GPU(x, y_g);
fprintf(stdout, "\n***_GPU_ Time taken for computing %d Fibonacci numbers is %.6fms\n\n", n, device_time);
i = (int)(n * (rand() / (RAND_MAX + 1.0f)));
fprintf(stdout, "*** Fibonacci number of %d is (CPU :%d , GPU :%d).\n\n", x[i], y_c[i], y_g[i]);
// Write the output array into the output file.
fp = fopen("y.binary", "wb");
if (!fp) {
fprintf(stderr, "Error: cannot open the output file...\n");
exit(-1);
}
fwrite(&n, sizeof(int), 1, fp);
fwrite(y_c, sizeof(int), n, fp);
fclose(fp);
free(x);
free(y_c);
free(y_g);
}
#endif
#if prac==4
const int ELEM_PER_VECTOR = 32;
int main()
{
int n;
srand((unsigned)&n);
printf("Enter a size: ");
scanf("%d", &n);
int size = ELEM_PER_VECTOR * n;
float* vec = new float[size];
for (int i = 0; i < size; ++i)
{
vec[i] = (float(rand()) * 2.f / RAND_MAX) - 1.f;
}
float(*mat)[ELEM_PER_VECTOR] = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
for (int i = 0; i < ELEM_PER_VECTOR; ++i)
{
for (int j = 0; j < ELEM_PER_VECTOR; ++j)
{
mat[i][j] = (float(rand()) * 2.f / RAND_MAX) - 1.f;
}
}
FILE* fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(float), 1, fp);
fwrite(vec, sizeof(float), size, fp);
fwrite(mat, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
fp = fopen("gen.bin", "rb");
float* vec2 = new float[size];
float(*mat2)[ELEM_PER_VECTOR] = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
int m;
fread(&m, sizeof(float), 1, fp);
fread(vec2, sizeof(float), m * ELEM_PER_VECTOR, fp);
fread(mat2, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
if (n != m) printf("error: size diff. %n != %n", n, m);
for (int i = 0; i < size; ++i)
{
if (vec[i] != vec2[i])
{
printf("[%d] %f != %f\n", vec[i], vec2[i]);
break;
}
}
for (int i = 0; i < ELEM_PER_VECTOR; ++i)
{
for (int j = 0; j < ELEM_PER_VECTOR; ++j)
{
if (mat[i][j] != mat2[i][j])
{
printf("[%d][%d]\n", i, j);
break;
}
}
}
fclose(fp);
delete[] vec;
return 0;
}
#endif
#if prac==5
#define N_EQUATIONS 1048576
#define BLOCK_SIZE 16
float *A, *B, *C;
void find_roots_CPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int i;
float a, b, c, d, x0, x1, tmp;
for (i = 0; i < n; i++) {
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
}
__global__ void find_roots_Kernel(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int i = gridDim.x*blockDim.x*row + col;
float a, b, c, d, x0, x1, tmp;
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
void find_roots_GPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
CHECK_TIME_INIT_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
//Array _pVecX, _pMatA, _pVecY_G;
float *_A, *_B, *_C;
float *_X0, *_X1, *_FX0, *_FX1;
size_t size = n*sizeof(float);
size_t rtsize = (size_t)sqrt(float(n));
CUDA_CALL(hipMalloc(&_A, size))
CUDA_CALL(hipMemcpy(_A, A, size, hipMemcpyHostToDevice))
CUDA_CALL(hipMalloc(&_B, size))
CUDA_CALL(hipMemcpy(_B, B, size, hipMemcpyHostToDevice))
CUDA_CALL(hipMalloc(&_C, size))
CUDA_CALL(hipMemcpy(_C, C, size, hipMemcpyHostToDevice))
CUDA_CALL(hipMalloc(&_X0, size))
CUDA_CALL(hipMalloc(&_X1, size))
CUDA_CALL(hipMalloc(&_FX0, size))
CUDA_CALL(hipMalloc(&_FX1, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid( rtsize/ dimBlock.x, rtsize / dimBlock.y);
dim3 dimGrid( 512/ dimBlock.x, 2048 / dimBlock.y);
CHECK_TIME_START_GPU()
hipLaunchKernelGGL(( find_roots_Kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, _A, _B, _C, _X0, _X1, _FX0, _FX1, n);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(X0, _X0, size, hipMemcpyDeviceToHost))
CUDA_CALL(hipMemcpy(X1, _X1, size, hipMemcpyDeviceToHost))
CUDA_CALL(hipMemcpy(FX0, _FX0, size, hipMemcpyDeviceToHost))
CUDA_CALL(hipMemcpy(FX1, _FX1, size, hipMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
hipFree(_A);
hipFree(_B);
hipFree(_C);
hipFree(_X0);
hipFree(_X1);
hipFree(_FX0);
hipFree(_FX1);
}
void read_poly(){
//float *_A, float *_B, float *_C
int i, n = N_EQUATIONS;
FILE *fA = fopen("A.bin", "rb");
if(!fA){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fA);
A = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(A, sizeof(float), N_EQUATIONS, fA);
fclose(fA);
FILE *fB = fopen("B.bin", "rb");
if(!fB){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fB);
B = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(B, sizeof(float), n, fB);
fclose(fB);
FILE *fC = fopen("C.bin", "rb");
if(!fC){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fC);
C = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(C, sizeof(float), n, fC);
fclose(fC);
}
void write_poly(float *X0, float *X1, float *FX0, float *FX1, int n)
{
FILE *x0, *x1, *fx0, *fx1;
size_t cnt;
x0 = fopen("X0.bin", "wb");
cnt = fwrite (X0, 4, n, x0);
fclose(x0);
x1 = fopen("X1.bin", "wb");
cnt = fwrite (X1, 4, n, x1);
fclose(x1);
fx0 = fopen("FX0.bin", "wb");
cnt = fwrite (FX0, 4, n, fx0);
fclose(fx0);
fx1 = fopen("FX1.bin", "wb");
cnt = fwrite (FX1, 4, n, fx1);
fclose(fx1);
}
int main(){
// float *A, *B, *C;
float *cX0, *cX1, *cFX0, *cFX1, *gX0, *gX1, *gFX0, *gFX1;
int n = N_EQUATIONS;
//read files
read_poly();
printf("n = %d file open ok.\n", n);
//check CPU time
cX0 = (float*)malloc(sizeof(float)*n);
cX1 = (float*)malloc(sizeof(float)*n);
cFX0 = (float*)malloc(sizeof(float)*n);
cFX1 = (float*)malloc(sizeof(float)*n);
CHECK_TIME_START;
find_roots_CPU(A,B,C, cX0, cX1, cFX0, cFX1, n);
CHECK_TIME_END(compute_time);
printf("***CPU Time taken = %.6fms\n", compute_time);
//check GPU time
gX0 = (float*)malloc(sizeof(float)*n);
gX1 = (float*)malloc(sizeof(float)*n);
gFX0 = (float*)malloc(sizeof(float)*n);
gFX1 = (float*)malloc(sizeof(float)*n);
find_roots_GPU(A,B,C, gX0, gX1, gFX0, gFX1, n);
printf("***GPU Time taken = %.6fms\n", device_time);
//check if same result
printf("CPU result X0[1048575], fX0[1048575] = %f, %f\n", cX0[1048575], cFX0[1048575]);
printf("GPU result X0[1048575], fX0[1048575] = %f, %f\n", gX0[1048575], gFX0[1048575]);
//save in file
write_poly(gX0, gX1, gFX0, gFX1, n);
}
#endif
| 00473f05bf413b53858eb6cde8eb9729ad49ca81.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 exercise
//2 prac 1-3
//3 Fibonacci
//4 gen
//5 hw1
#define prac 2
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
/*
if block size is 8, / gpu: 13.84512
if block size is 16, / gpu: 8.617824
if block size is 24, / gpu: 9.498592
if block size is 32, cpu: 79.871399 / gpu: 9.26400
*/
#if prac==1
typedef struct {
int width;
int height;
float *elements;
} Array;
#define MAX_N_ELEMENTS (1 << 25)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float *x, float *y, float *z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id])+ cos(A.elements[id])*sin(B.elements[id]));
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 32;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width =1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
//CHECK_TIME_START;
cudaError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
//CHECK_TIME_END(compute_time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
//아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것.
CHECK_TIME_INIT_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_A.elements, size))
CUDA_CALL(cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_B.elements, size))
CUDA_CALL(cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CHECK_TIME_START_GPU()
CombineTwoArrraysKernel <<< dimGrid, dimBlock >>> (d_A, d_B, d_C);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
return cudaStatus;
}
#endif
//N = 4096
//CPU: 11.082584
//Block Size 32 -> 3.419040
//Block Size 24 -> 0.3448
//Block Size 16 -> 0.371936
//Block Size 8 -> 0.369760
#if prac==2
typedef struct {
int width;
int height;
float *elements;
} Array;
int n;
#define BLOCK_SIZE 16
const int ELEM_PER_VECTOR = 32;
float (*pVecX)[ELEM_PER_VECTOR], (*pVecY)[ELEM_PER_VECTOR], (*pVecY_G)[ELEM_PER_VECTOR];
float(*pMatA)[ELEM_PER_VECTOR];
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
printf("n: %d\n", n);
pVecX = new float[n][ELEM_PER_VECTOR];
pVecY = new float[n][ELEM_PER_VECTOR];
pVecY_G = new float[n][ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int i, j, k;
float sum;
for( k = 0 ; k < n; k++){
for( i = 0 ; i < ELEM_PER_VECTOR ; i++){
sum = 0;
for(j = 0 ; j < ELEM_PER_VECTOR ; j++){
sum += pMatA[i][j] * pVecX[k][j];
}
pVecY[k][i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(const Array VecX, const Array MatA, Array VecY)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int id = gridDim.x*blockDim.x*row + col;
int i,j,k;
VecY.elements[row*ELEM_PER_VECTOR + col] = 0;
for(k = 0 ; k < ELEM_PER_VECTOR ; k++)
VecY.elements[row*ELEM_PER_VECTOR + col] += (MatA.elements[col * ELEM_PER_VECTOR + k] * VecX.elements[row*ELEM_PER_VECTOR+k]);
}
void Mat_Vec_Multiply_GPU()
{
//아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것.
CHECK_TIME_INIT_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array _pVecX, _pMatA, _pVecY_G;
size_t size;
_pVecX.width = ELEM_PER_VECTOR; _pVecX.height = n;
size = n * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(cudaMalloc(&_pVecX.elements, size))
CUDA_CALL(cudaMemcpy(_pVecX.elements, pVecX, size, cudaMemcpyHostToDevice))
_pMatA.width = ELEM_PER_VECTOR; _pVecX.height = ELEM_PER_VECTOR;
size = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(cudaMalloc(&_pMatA.elements, size))
CUDA_CALL(cudaMemcpy(_pMatA.elements, pMatA, size, cudaMemcpyHostToDevice))
_pVecY_G.width = ELEM_PER_VECTOR; _pVecY_G.height = n;
size = n * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(cudaMalloc(&_pVecY_G.elements, n*ELEM_PER_VECTOR))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid((ELEM_PER_VECTOR *ELEM_PER_VECTOR )/ dimBlock.x, n / dimBlock.y);
dim3 dimGrid(_pVecX.width/ dimBlock.x, _pVecX.height / dimBlock.y); //32 n
CHECK_TIME_START_GPU()
Mat_Vec_Multiply_Kernel <<< dimGrid, dimBlock >>> (_pVecX, _pMatA, _pVecY_G);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(pVecY_G, _pVecY_G.elements, n*ELEM_PER_VECTOR, cudaMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
cudaFree(_pVecX.elements);
cudaFree(_pMatA.elements);
cudaFree(_pVecY_G.elements);
}
int main()
{
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0][0], compute_time);
Mat_Vec_Multiply_GPU();
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0][0], device_time);
}
#endif
#if prac==3
#define BLOCK_SIZE 8
#define N 67108864 // 8192 * 8192 = 2^13 * 2^13
int Fibonacci(int n) {
// DO NOT MODIFY THIS FUNCTION!!!
float sqrt_5, x_0, x_1;
float tmp_0, tmp_1;
sqrt_5 = sqrtf(5.0f);
x_0 = (1.0f + sqrt_5) / 2.0f;
x_1 = (1.0f - sqrt_5) / 2.0f;
tmp_0 = tmp_1 = 1.0f;
for (int i = 0; i < n; i++) {
tmp_0 *= x_0;
tmp_1 *= x_1;
}
return (int)((tmp_0 - tmp_1) / sqrt_5 + 0.5);
}
void generate_input(int *x, int n) {
// DO NOT MODIFY THIS FUNCTION!!!
srand((unsigned int)time(NULL));
for (int i = 0; i < n; i++) {
x[i] = 35 + (int)(5.0f * rand() / RAND_MAX + 0.5f);
}
}
__global__ void Fibonacci_Kernel(int *x, int *y)
{
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
//int id = threadIdx.x;
float sqrt_5, x_0, x_1;
float tmp_0, tmp_1;
sqrt_5 = sqrtf(5.0f);
x_0 = (1.0f + sqrt_5) / 2.0f;
x_1 = (1.0f - sqrt_5) / 2.0f;
tmp_0 = tmp_1 = 1.0f;
for (int i = 0; i < x[id]; i++) {
tmp_0 *= x_0;
tmp_1 *= x_1;
}
y[id] = (int)((tmp_0 - tmp_1) / sqrt_5 + 0.5);
}
void Fibonacci_GPU(int *x, int *y)
{
CHECK_TIME_INIT_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
int *_y, *_x;
size_t size = N * sizeof(int);
size_t rtsize = (size_t)(sqrt((float)N));
CUDA_CALL(cudaMalloc(&_y, size))
CUDA_CALL(cudaMalloc(&_x, size))
CUDA_CALL(cudaMemcpy(_x, x, size, cudaMemcpyHostToDevice))
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid( rtsize / dimBlock.x, rtsize / dimBlock.y);
dim3 dimGrid( 1024 / dimBlock.x, (N/1024) / dimBlock.y);
CHECK_TIME_START_GPU()
Fibonacci_Kernel <<< dimGrid, dimBlock >>> (_x, _y);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(cudaGetLastError())
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(y, _y, size, cudaMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
cudaFree(_x);
cudaFree(_y);
}
int *x, *y_c, *y_g; // input/output arrays
void main(void) {
int n, i;
FILE *fp;
// Read the input array from the input file if one already exists.
fp = fopen("x.binary", "rb");
if (!fp) {
fprintf(stderr, "Error: cannot open the input file...\n");
exit(-1);
}
fread(&n, sizeof(int), 1, fp);
fprintf(stdout, "\n*** The problem size is %d.\n", n);
x = (int *)malloc(sizeof(int)*n);
if (!x) {
fprintf(stderr, "Error: cannot allocate memory for the input array...\n");
exit(-1);
}
fread(x, sizeof(int), n, fp);
fclose(fp);
y_c = (int *)malloc(sizeof(int)*n);
y_g = (int *)malloc(sizeof(int)*n);
//CPU
CHECK_TIME_START;
for (i = 0; i < n; i++) {
y_c[i] = Fibonacci(x[i]);
}
CHECK_TIME_END(compute_time);
fprintf(stdout, "\n***_CPU_ Time taken for computing %d Fibonacci numbers is %.6fms\n\n", n, compute_time);
//GPU
Fibonacci_GPU(x, y_g);
fprintf(stdout, "\n***_GPU_ Time taken for computing %d Fibonacci numbers is %.6fms\n\n", n, device_time);
i = (int)(n * (rand() / (RAND_MAX + 1.0f)));
fprintf(stdout, "*** Fibonacci number of %d is (CPU :%d , GPU :%d).\n\n", x[i], y_c[i], y_g[i]);
// Write the output array into the output file.
fp = fopen("y.binary", "wb");
if (!fp) {
fprintf(stderr, "Error: cannot open the output file...\n");
exit(-1);
}
fwrite(&n, sizeof(int), 1, fp);
fwrite(y_c, sizeof(int), n, fp);
fclose(fp);
free(x);
free(y_c);
free(y_g);
}
#endif
#if prac==4
const int ELEM_PER_VECTOR = 32;
int main()
{
int n;
srand((unsigned)&n);
printf("Enter a size: ");
scanf("%d", &n);
int size = ELEM_PER_VECTOR * n;
float* vec = new float[size];
for (int i = 0; i < size; ++i)
{
vec[i] = (float(rand()) * 2.f / RAND_MAX) - 1.f;
}
float(*mat)[ELEM_PER_VECTOR] = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
for (int i = 0; i < ELEM_PER_VECTOR; ++i)
{
for (int j = 0; j < ELEM_PER_VECTOR; ++j)
{
mat[i][j] = (float(rand()) * 2.f / RAND_MAX) - 1.f;
}
}
FILE* fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(float), 1, fp);
fwrite(vec, sizeof(float), size, fp);
fwrite(mat, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
fp = fopen("gen.bin", "rb");
float* vec2 = new float[size];
float(*mat2)[ELEM_PER_VECTOR] = new float[ELEM_PER_VECTOR][ELEM_PER_VECTOR];
int m;
fread(&m, sizeof(float), 1, fp);
fread(vec2, sizeof(float), m * ELEM_PER_VECTOR, fp);
fread(mat2, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
if (n != m) printf("error: size diff. %n != %n", n, m);
for (int i = 0; i < size; ++i)
{
if (vec[i] != vec2[i])
{
printf("[%d] %f != %f\n", vec[i], vec2[i]);
break;
}
}
for (int i = 0; i < ELEM_PER_VECTOR; ++i)
{
for (int j = 0; j < ELEM_PER_VECTOR; ++j)
{
if (mat[i][j] != mat2[i][j])
{
printf("[%d][%d]\n", i, j);
break;
}
}
}
fclose(fp);
delete[] vec;
return 0;
}
#endif
#if prac==5
#define N_EQUATIONS 1048576
#define BLOCK_SIZE 16
float *A, *B, *C;
void find_roots_CPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int i;
float a, b, c, d, x0, x1, tmp;
for (i = 0; i < n; i++) {
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
}
__global__ void find_roots_Kernel(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int i = gridDim.x*blockDim.x*row + col;
float a, b, c, d, x0, x1, tmp;
a = A[i]; b = B[i]; c = C[i];
d = sqrtf(b*b - 4.0f*a*c);
tmp = 1.0f / (2.0f*a);
X0[i] = x0 = (-b - d) * tmp;
X1[i] = x1 = (-b + d) * tmp;
FX0[i] = (a*x0 + b)*x0 + c;
FX1[i] = (a*x1 + b)*x1 + c;
}
void find_roots_GPU(float *A, float *B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n)
{
CHECK_TIME_INIT_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
//Array _pVecX, _pMatA, _pVecY_G;
float *_A, *_B, *_C;
float *_X0, *_X1, *_FX0, *_FX1;
size_t size = n*sizeof(float);
size_t rtsize = (size_t)sqrt(float(n));
CUDA_CALL(cudaMalloc(&_A, size))
CUDA_CALL(cudaMemcpy(_A, A, size, cudaMemcpyHostToDevice))
CUDA_CALL(cudaMalloc(&_B, size))
CUDA_CALL(cudaMemcpy(_B, B, size, cudaMemcpyHostToDevice))
CUDA_CALL(cudaMalloc(&_C, size))
CUDA_CALL(cudaMemcpy(_C, C, size, cudaMemcpyHostToDevice))
CUDA_CALL(cudaMalloc(&_X0, size))
CUDA_CALL(cudaMalloc(&_X1, size))
CUDA_CALL(cudaMalloc(&_FX0, size))
CUDA_CALL(cudaMalloc(&_FX1, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid( rtsize/ dimBlock.x, rtsize / dimBlock.y);
dim3 dimGrid( 512/ dimBlock.x, 2048 / dimBlock.y);
CHECK_TIME_START_GPU()
find_roots_Kernel <<< dimGrid, dimBlock >>> (_A, _B, _C, _X0, _X1, _FX0, _FX1, n);
CHECK_TIME_END_GPU(device_time)
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(X0, _X0, size, cudaMemcpyDeviceToHost))
CUDA_CALL(cudaMemcpy(X1, _X1, size, cudaMemcpyDeviceToHost))
CUDA_CALL(cudaMemcpy(FX0, _FX0, size, cudaMemcpyDeviceToHost))
CUDA_CALL(cudaMemcpy(FX1, _FX1, size, cudaMemcpyDeviceToHost))
CHECK_TIME_DEST_GPU()
Error:
cudaFree(_A);
cudaFree(_B);
cudaFree(_C);
cudaFree(_X0);
cudaFree(_X1);
cudaFree(_FX0);
cudaFree(_FX1);
}
void read_poly(){
//float *_A, float *_B, float *_C
int i, n = N_EQUATIONS;
FILE *fA = fopen("A.bin", "rb");
if(!fA){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fA);
A = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(A, sizeof(float), N_EQUATIONS, fA);
fclose(fA);
FILE *fB = fopen("B.bin", "rb");
if(!fB){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fB);
B = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(B, sizeof(float), n, fB);
fclose(fB);
FILE *fC = fopen("C.bin", "rb");
if(!fC){
printf("file open error\n");
exit(-1);
}
fread(&n, sizeof(float), 1, fC);
C = (float*)malloc(sizeof(float)*N_EQUATIONS);
fread(C, sizeof(float), n, fC);
fclose(fC);
}
void write_poly(float *X0, float *X1, float *FX0, float *FX1, int n)
{
FILE *x0, *x1, *fx0, *fx1;
size_t cnt;
x0 = fopen("X0.bin", "wb");
cnt = fwrite (X0, 4, n, x0);
fclose(x0);
x1 = fopen("X1.bin", "wb");
cnt = fwrite (X1, 4, n, x1);
fclose(x1);
fx0 = fopen("FX0.bin", "wb");
cnt = fwrite (FX0, 4, n, fx0);
fclose(fx0);
fx1 = fopen("FX1.bin", "wb");
cnt = fwrite (FX1, 4, n, fx1);
fclose(fx1);
}
int main(){
// float *A, *B, *C;
float *cX0, *cX1, *cFX0, *cFX1, *gX0, *gX1, *gFX0, *gFX1;
int n = N_EQUATIONS;
//read files
read_poly();
printf("n = %d file open ok.\n", n);
//check CPU time
cX0 = (float*)malloc(sizeof(float)*n);
cX1 = (float*)malloc(sizeof(float)*n);
cFX0 = (float*)malloc(sizeof(float)*n);
cFX1 = (float*)malloc(sizeof(float)*n);
CHECK_TIME_START;
find_roots_CPU(A,B,C, cX0, cX1, cFX0, cFX1, n);
CHECK_TIME_END(compute_time);
printf("***CPU Time taken = %.6fms\n", compute_time);
//check GPU time
gX0 = (float*)malloc(sizeof(float)*n);
gX1 = (float*)malloc(sizeof(float)*n);
gFX0 = (float*)malloc(sizeof(float)*n);
gFX1 = (float*)malloc(sizeof(float)*n);
find_roots_GPU(A,B,C, gX0, gX1, gFX0, gFX1, n);
printf("***GPU Time taken = %.6fms\n", device_time);
//check if same result
printf("CPU result X0[1048575], fX0[1048575] = %f, %f\n", cX0[1048575], cFX0[1048575]);
printf("GPU result X0[1048575], fX0[1048575] = %f, %f\n", gX0[1048575], gFX0[1048575]);
//save in file
write_poly(gX0, gX1, gFX0, gFX1, n);
}
#endif
|
cd509e8a434eb866a5edc9304a248f66cb795312.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Vectorization.cuh"
//__constant__ unsigned int LineSize;
//__constant__ unsigned int SearchLength;
//
//void setLineSize(unsigned int line_size, hipStream_t& stream) {
// hipMemcpyToSymbolAsync((const void*)&LineSize, (const void*)&line_size, sizeof(unsigned int), 0, hipMemcpyHostToDevice, stream);
//}
//
//void setSearchLength(unsigned int search_length, hipStream_t& stream) {
// hipMemcpyToSymbolAsync((const void*)&SearchLength, (const void*)&search_length, sizeof(unsigned int), 0, hipMemcpyHostToDevice, stream);
//}
__global__ void clearArray(
unsigned char* arr,
const unsigned int lenght)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int skip = gridDim.x * blockDim.x;
while (offset < lenght)
{
arr[offset] = 0;
offset += skip;
}
}
__global__ void splitLine(
unsigned char* __restrict__ lines,
const unsigned int lines_lenght,
const unsigned int line_size,
unsigned char* strings,
unsigned int* lengths)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < lines_lenght)
{
unsigned int ol = offset * LINE_SIZE;
unsigned int op = offset * LINE_PARTS;
const unsigned char split = ',', escape = '\"';
unsigned int part = 0;
unsigned int len = 0;
unsigned int pos = 0;
unsigned char c_old = 0;
bool escaped = false;
bool do_split = false;
for (unsigned int i = 0; i < line_size && pos < LINE_SIZE; i++)
{
const unsigned char c = lines[ol + i];
strings[ol + pos] = c;
escaped = (c_old == escape && c == split) ? false : escaped;
escaped = (c_old == split && c == escape) ? true : escaped;
do_split = c == split && !escaped;
part += do_split ? 1 : 0;
len = do_split ? 0 : (len + 1);
lengths[op + part] = len;
pos = part * ITEM_SIZE + len;
c_old = c;
}
}
}
__global__ void findPattern(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
unsigned char* __restrict__ patterns,
const unsigned int patterns_lenght,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset_string = blockDim.x * blockIdx.x + threadIdx.x;
if (offset_string < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset_string * LINE_SIZE + part;
unsigned int result = 0;
for (unsigned int pattern = 0; pattern < patterns_lenght; pattern++)
{
unsigned int op = pattern * ITEM_SIZE;
unsigned int p = 0;
bool found = false;
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char a = strings[os + i];
const unsigned char b = patterns[op + p];
found += (b == EMPTY_CHAR) ? (p > 0) : found;
p = (a == b) ? (p + 1) : 0;
}
result += found;
}
results[offset_string * RESULT_VALUES + results_offset] = result > 0;
}
}
__global__ void countChars(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset_string = blockDim.x * blockIdx.x + threadIdx.x;
if (offset_string < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset_string * LINE_SIZE + part;
const unsigned char whites_c[] = { ' ', '\f', '\n', '\r', '\t', '\v' };
RESULT_TYPE counts[5];
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char c = strings[os + i];
counts[0] += (c >= 'a' && c <= 'z') ? 1 : 0;
counts[1] += (c >= 'A' && c <= 'Z') ? 1 : 0;
counts[2] += (c >= '0' && c <= '9') ? 1 : 0;
bool is_white = false;
for (unsigned int w = 0; w < 6; w++) {
counts[3] += c == whites_c[w] ? 1 : 0;
is_white = c == whites_c[w] ? true : is_white;
}
bool is_not_special = is_white;
is_not_special = (c >= 'a' && c <= 'z') ? true : is_not_special;
is_not_special = (c >= 'A' && c <= 'Z') ? true : is_not_special;
is_not_special = (c >= '0' && c <= '9') ? true : is_not_special;
is_not_special = c == EMPTY_CHAR ? true : is_not_special;
is_not_special = c == 0 ? true : is_not_special;
counts[4] += is_not_special ? 0 : 1;
}
os = offset_string * RESULT_VALUES + results_offset;
for (unsigned int i = 0; i < 5; i++)
results[os + i] = counts[i];
}
}
__global__ void stringToInt(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
const unsigned char zero = '0';
const unsigned char nine = '9';
RESULT_TYPE val = 0;
// get int
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char c = strings[os + i];
val = (c >= zero && c <= nine) ? (val * 10 + c - zero) : val;
}
results[offset * RESULT_VALUES + results_offset] = val;
}
}
__global__ void dateToInt(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
const unsigned char zero = '0';
const unsigned char nine = '9';
RESULT_TYPE values[8];
int type = -1;
// get int
for (unsigned int i = 0; i < 20; i++)
{
const unsigned char c = strings[os + i];
bool number = c >= zero && c <= nine;
type += number ? 0 : 1;
values[type] = number ? (values[type] * 10 + c - zero) : 0;
}
os = offset * RESULT_VALUES + results_offset;
for (unsigned int i = 0; i < 6; i++)
results[os + i] = values[i];
}
}
__global__ void copyTo(
unsigned int* __restrict__ input,
const unsigned int intput_padding,
const unsigned int intput_offset,
const unsigned int intput_length,
const unsigned int change,
const unsigned int results_padding,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int skip = gridDim.x * blockDim.x;
while (offset < intput_length)
{
unsigned int oi = offset * intput_padding + intput_offset;
unsigned int oo = offset * results_padding + results_offset;
results[oo] = input[oi] + change;
offset += skip;
}
}
__global__ void clasify(
RESULT_TYPE* results,
const unsigned int lines,
const unsigned int classificator_offset,
const unsigned int resultS_offset) //relative
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < lines)
{
unsigned int or = offset * RESULT_VALUES + classificator_offset;
RESULT_TYPE category = 0; // neutral
category = (results[or + 5] > 0) ? 1 : 0; // user
bool dangerous = results[or] > 0;
category = dangerous ? 2 : category; // dangerous url
if (!dangerous)
{
bool safe = results[or + 1] > 0; // safe url
bool trusted = results[or + 3] > 0; // agent trusted
category = safe ? 5 : category; // guest
category = safe && trusted ? 4 : category; // trusted
category = (results[or + 3] > 0) ? 3 : category; // Search engine
}
results[or + resultS_offset] = category;
}
}
__global__ void sanitize(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
unsigned char* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
unsigned int or = offset * TEXT_RESULT_VALUES_SIZE + results_offset;
const unsigned char sanitize = ',';
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char s = strings[os + i];
results[or] = s == sanitize ? results[or] : s;
or += s == sanitize ? 0 : 1;
}
}
} | cd509e8a434eb866a5edc9304a248f66cb795312.cu | #include "Vectorization.cuh"
//__constant__ unsigned int LineSize;
//__constant__ unsigned int SearchLength;
//
//void setLineSize(unsigned int line_size, cudaStream_t& stream) {
// cudaMemcpyToSymbolAsync((const void*)&LineSize, (const void*)&line_size, sizeof(unsigned int), 0, cudaMemcpyHostToDevice, stream);
//}
//
//void setSearchLength(unsigned int search_length, cudaStream_t& stream) {
// cudaMemcpyToSymbolAsync((const void*)&SearchLength, (const void*)&search_length, sizeof(unsigned int), 0, cudaMemcpyHostToDevice, stream);
//}
__global__ void clearArray(
unsigned char* arr,
const unsigned int lenght)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int skip = gridDim.x * blockDim.x;
while (offset < lenght)
{
arr[offset] = 0;
offset += skip;
}
}
__global__ void splitLine(
unsigned char* __restrict__ lines,
const unsigned int lines_lenght,
const unsigned int line_size,
unsigned char* strings,
unsigned int* lengths)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < lines_lenght)
{
unsigned int ol = offset * LINE_SIZE;
unsigned int op = offset * LINE_PARTS;
const unsigned char split = ',', escape = '\"';
unsigned int part = 0;
unsigned int len = 0;
unsigned int pos = 0;
unsigned char c_old = 0;
bool escaped = false;
bool do_split = false;
for (unsigned int i = 0; i < line_size && pos < LINE_SIZE; i++)
{
const unsigned char c = lines[ol + i];
strings[ol + pos] = c;
escaped = (c_old == escape && c == split) ? false : escaped;
escaped = (c_old == split && c == escape) ? true : escaped;
do_split = c == split && !escaped;
part += do_split ? 1 : 0;
len = do_split ? 0 : (len + 1);
lengths[op + part] = len;
pos = part * ITEM_SIZE + len;
c_old = c;
}
}
}
__global__ void findPattern(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
unsigned char* __restrict__ patterns,
const unsigned int patterns_lenght,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset_string = blockDim.x * blockIdx.x + threadIdx.x;
if (offset_string < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset_string * LINE_SIZE + part;
unsigned int result = 0;
for (unsigned int pattern = 0; pattern < patterns_lenght; pattern++)
{
unsigned int op = pattern * ITEM_SIZE;
unsigned int p = 0;
bool found = false;
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char a = strings[os + i];
const unsigned char b = patterns[op + p];
found += (b == EMPTY_CHAR) ? (p > 0) : found;
p = (a == b) ? (p + 1) : 0;
}
result += found;
}
results[offset_string * RESULT_VALUES + results_offset] = result > 0;
}
}
__global__ void countChars(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset_string = blockDim.x * blockIdx.x + threadIdx.x;
if (offset_string < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset_string * LINE_SIZE + part;
const unsigned char whites_c[] = { ' ', '\f', '\n', '\r', '\t', '\v' };
RESULT_TYPE counts[5];
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char c = strings[os + i];
counts[0] += (c >= 'a' && c <= 'z') ? 1 : 0;
counts[1] += (c >= 'A' && c <= 'Z') ? 1 : 0;
counts[2] += (c >= '0' && c <= '9') ? 1 : 0;
bool is_white = false;
for (unsigned int w = 0; w < 6; w++) {
counts[3] += c == whites_c[w] ? 1 : 0;
is_white = c == whites_c[w] ? true : is_white;
}
bool is_not_special = is_white;
is_not_special = (c >= 'a' && c <= 'z') ? true : is_not_special;
is_not_special = (c >= 'A' && c <= 'Z') ? true : is_not_special;
is_not_special = (c >= '0' && c <= '9') ? true : is_not_special;
is_not_special = c == EMPTY_CHAR ? true : is_not_special;
is_not_special = c == 0 ? true : is_not_special;
counts[4] += is_not_special ? 0 : 1;
}
os = offset_string * RESULT_VALUES + results_offset;
for (unsigned int i = 0; i < 5; i++)
results[os + i] = counts[i];
}
}
__global__ void stringToInt(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
const unsigned char zero = '0';
const unsigned char nine = '9';
RESULT_TYPE val = 0;
// get int
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char c = strings[os + i];
val = (c >= zero && c <= nine) ? (val * 10 + c - zero) : val;
}
results[offset * RESULT_VALUES + results_offset] = val;
}
}
__global__ void dateToInt(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
const unsigned char zero = '0';
const unsigned char nine = '9';
RESULT_TYPE values[8];
int type = -1;
// get int
for (unsigned int i = 0; i < 20; i++)
{
const unsigned char c = strings[os + i];
bool number = c >= zero && c <= nine;
type += number ? 0 : 1;
values[type] = number ? (values[type] * 10 + c - zero) : 0;
}
os = offset * RESULT_VALUES + results_offset;
for (unsigned int i = 0; i < 6; i++)
results[os + i] = values[i];
}
}
__global__ void copyTo(
unsigned int* __restrict__ input,
const unsigned int intput_padding,
const unsigned int intput_offset,
const unsigned int intput_length,
const unsigned int change,
const unsigned int results_padding,
const unsigned int results_offset,
RESULT_TYPE* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int skip = gridDim.x * blockDim.x;
while (offset < intput_length)
{
unsigned int oi = offset * intput_padding + intput_offset;
unsigned int oo = offset * results_padding + results_offset;
results[oo] = input[oi] + change;
offset += skip;
}
}
__global__ void clasify(
RESULT_TYPE* results,
const unsigned int lines,
const unsigned int classificator_offset,
const unsigned int resultS_offset) //relative
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < lines)
{
unsigned int or = offset * RESULT_VALUES + classificator_offset;
RESULT_TYPE category = 0; // neutral
category = (results[or + 5] > 0) ? 1 : 0; // user
bool dangerous = results[or] > 0;
category = dangerous ? 2 : category; // dangerous url
if (!dangerous)
{
bool safe = results[or + 1] > 0; // safe url
bool trusted = results[or + 3] > 0; // agent trusted
category = safe ? 5 : category; // guest
category = safe && trusted ? 4 : category; // trusted
category = (results[or + 3] > 0) ? 3 : category; // Search engine
}
results[or + resultS_offset] = category;
}
}
__global__ void sanitize(
unsigned char* __restrict__ strings,
const unsigned int part,
const unsigned int strings_lenght,
const unsigned int search_size,
const unsigned int results_offset,
unsigned char* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
if (offset < strings_lenght)
{
// strings: partA1, partA2, ... , partAN, partB1, partB2, ...
// ------------ LINE ----------, -ITEM-, -ITEM-
unsigned int os = offset * LINE_SIZE + part;
unsigned int or = offset * TEXT_RESULT_VALUES_SIZE + results_offset;
const unsigned char sanitize = ',';
for (unsigned int i = 0; i < search_size; i++)
{
const unsigned char s = strings[os + i];
results[or] = s == sanitize ? results[or] : s;
or += s == sanitize ? 0 : 1;
}
}
} |
e7d9ae25cb2f07af8c098bbb274b29b9f6d2902c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__
void saxpy(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void) {
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
std::cout << "Max error: " << maxError << std::endl;
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
} | e7d9ae25cb2f07af8c098bbb274b29b9f6d2902c.cu | #include <iostream>
__global__
void saxpy(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void) {
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
std::cout << "Max error: " << maxError << std::endl;
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
} |
b69a199e3e813984875d91bcf439a897ae7eced3.hip | // !!! This is a file automatically generated by hipify!!!
#include "assert.h"
#include "../utils/gpuErrorCheck.h"
#include "../utils/rsvd.h"
#include <vector>
#include <iostream>
#include <hip/hip_runtime.h>
/* Tuned settings for float and double */
/*
#define FLOAT
#define real_t float
#define BLK_WIDTH 16
#define BLK_HEIGHT 128
#define NUM_THREADS 128
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0xF
#define TID_U_SHIFT 4
#define ZERO_INIT {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
*/
#define DOUBLE
#define real_t double
#define BLK_WIDTH 8
#define BLK_HEIGHT 64
#define NUM_THREADS 64
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0x7
#define TID_U_SHIFT 3
#define ZERO_INIT {0,0,0,0,0,0,0,0}
/******************************************* class ****************************************************/
class level_t {
public:
int num_blocks;
int aggregate_blocks;
int block_offset;
int lev;
level_t();
level_t(int nb, int ab, int bo, int l);
};
using namespace std;
/* This defines the main matrix data structure and functions to
access and modify it */
class QR_matrix {
public:
/* Input matrix properties */
int m;
int n;
int lda;
int ld_panel_size;
int ldq;
int ldq_panel;
int blks_tall_total;
int blks_wide_total;
int internal_matrix_size;
int total_blocks;
real_t * mat_base;
real_t * Q_base;
/* Current information */
real_t * mat_cur;
real_t * Q;
int m_current;
int n_current;
int blks_tall_cur;
int blks_wide_cur;
vector<level_t*> levels;
/* Constructors*/
QR_matrix();
QR_matrix(real_t * h_A, const int m, const int n, const int lda);
~QR_matrix();
/* Set the matrix in its internal form (transpose) and retrieve it back */
void factor();
void retrieveQ();
void calculate_dimensions(const int m, const int n);
void panelTranspose(const real_t * mat_in, const int m, const int n, const int lda);
void panelTransInv(real_t * mat_out, const int m, const int n, const int lda);
void retrieveR(real_t * mat_out, const int m, const int n, const int lda);
real_t * blockQ(const int l);
/* Update the pointer to the next panel */
void increment(bool levelChagneFlag);
void decrement(bool levelChagneFlag);
int set_levels();
};
/*************************************** CUDA kernels *****************************************************/
__device__ real_t reduce(real_t * u_sh, real_t ub[], real_t col[], real_t * av, int tid_u, int tid_l, int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
ub[i] = u_sh[tid_u + i*BLK_ROWS];
real_t val = (real_t) 0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
val += ub[i] * col[i];
if(tid >= (NUM_THREADS/2)) av[tid] = val;
__syncthreads();
if(tid < (NUM_THREADS/2)) av[tid] = av[tid + (NUM_THREADS/2)] + val;
__syncthreads();
val = 0;
#pragma unroll
for(int i = 0 ; i < (BLK_ROWS/2) ; i++)
val += av[tid_l + BLK_WIDTH*i];
return val;
}
__device__ void update(real_t * u_sh, real_t ub[], real_t col[], real_t res, int tid_u)
{
// Rank-1 update
real_t fres = (real_t) 2 * res;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
col[i] -= (real_t) ub[i] * fres;
}
__device__ void load_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE; i++)
col[i] = a[tid + i*NUM_THREADS];
}
__device__ void load_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
real_t * a_orig = a;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE / (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
col[(BLK_WIDTH/BLK_ROWS)*i + ii ] = a[tid + ii*NUM_THREADS];
}
a += offset_blocks * BLK_SIZE;
}
a = a_orig;
}
__device__ void write_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
a[tid + i*NUM_THREADS] = col[i];
}
__device__ void write_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE/ (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
a[tid + ii*NUM_THREADS] = col[(BLK_WIDTH/BLK_ROWS)*i + ii ];
}
a += offset_blocks * BLK_SIZE;
}
}
__device__ void load_u(real_t * u_sh, real_t * b, int tid)
{
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
u_sh[tid + i*BLK_HEIGHT] = b[tid + i*BLK_HEIGHT];
__syncthreads();
}
__global__ void hh_update_dense_reverse(real_t * a, real_t * b, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1) * BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
}
__device__ void compute_u(real_t * u_sh, real_t col[], real_t norms[], int tid, int tid_u, int tid_l, int j, int row, int m)
{
__shared__ real_t mulby_sh;
__syncthreads();
if(j + row >= m) {
u_sh[tid] = (real_t) 0;
}
else {
if(tid_l == j) {
real_t local = 0.0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++) {
u_sh[tid_u + i*BLK_ROWS] = col[i];
if(tid_u + i*BLK_ROWS > j) local += col[i] * col[i];
}
norms[tid_u] = local;
}
__syncthreads();
if(tid == j)
{
real_t nm2_nminus1 = (real_t)0.0;
#pragma unroll
for(int i = 0 ; i < BLK_ROWS ; i++)
nm2_nminus1 += norms[i];
real_t top_element = u_sh[j];
#ifdef DOUBLE
real_t nm = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t nm = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
u_sh[j] = top_element = (top_element >= (real_t)0) ? top_element + nm : top_element - nm;
#ifdef DOUBLE
real_t divby = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t divby = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
mulby_sh = (divby != (real_t) 0) ? ((real_t) 1.0) / divby : (real_t)0;
}
if(tid < j) u_sh[tid] = (real_t) 0;
__syncthreads();
u_sh[tid] *= mulby_sh;
}
__syncthreads();
}
// factor a small matrix block, householder vector is saved on b
__global__ void hh_factor_dense(real_t * a, real_t * b, int m, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply: res = v' * A(i:m, :);
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update: A(j:m, :) -= 2 * v * res;
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
//
__global__ void hh_update_dense(real_t * a, real_t * b, int lda_panel, int ldq, int max_y)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
for(int p = blockIdx.y ; p < max_y ; p += gridDim.y)
{
// load in the block
load_a(a, col, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
u_sh = &u[0];
a += gridDim.y * lda_panel;
}
}
__global__ void hh_factor_triangle(real_t * a, real_t * b, int m, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * (BLK_HEIGHT / BLK_WIDTH) * offset_blocks * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += row * BLK_WIDTH + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
__global__ void hh_update_triangle_reverse(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update A_max in case we are working on a different column
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1)*BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
__global__ void hh_update_triangle(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update the max in case we are on a different panel
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
/* Get the address of a block (i,j) for level l in the Q matrix */
real_t * QR_matrix::blockQ(const int l) {
assert(l <= levels.size());
int agg_blocks = levels[l]->aggregate_blocks;
// Get pointer to the next level
int offset_blocks = agg_blocks * BLK_SIZE;
return Q + offset_blocks;
}
/* Panel transpose of a block
<<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>> */
__global__ void blockTranspose(real_t * out, const real_t * in,
int ld_panel_size, int m, int n, int ld_col)
{
// Shared memory
__shared__ real_t sh[(BLK_HEIGHT + 1) * BLK_WIDTH];// why + 1? to avoid bank conflict?
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the input vector address
in += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
out+= BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
// If we are close to the border then this will be < BLK_WIDTH
int n_it = n - BLK_WIDTH * blockIdx.y;
// Load whole block into shared memory into column major
if(tid + BLK_HEIGHT * blockIdx.x < m) {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH; i++)
sh[tid + i*BLK_HEIGHT+i] = (i < n_it) ? in[tid + i * ld_col] : (real_t) 0;
} else {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[tid + i*BLK_HEIGHT+i] = (real_t) 0;
}
__syncthreads();
// Load block out of shared memory in transposed form
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
{ out[tid + i * BLK_HEIGHT] = sh[off + i * BLK_ROWS]; }
}
/* Panel transpose of the entire matrix (inverse) */
__global__ void trans_inv(real_t * out, const real_t * in, int ld_panel_size, int m, int n, int ld_col)
{
__shared__ real_t sh[(BLK_HEIGHT+1) * BLK_WIDTH];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the output matrix
in+= BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
out+= BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
// In case we run off the end in the n direction
int n_it = n - BLK_WIDTH * blockIdx.y;
n_it = (n_it < BLK_WIDTH) ? n_it : BLK_WIDTH;
// Load block into shared memory in column major
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[off + i * BLK_ROWS] = in[tid + i * BLK_HEIGHT];
__syncthreads();
// Wrtite back out
if(tid + BLK_HEIGHT * blockIdx.x >= m) return;
for(int i = 0 ; i < n_it; i++)
out[tid + i * ld_col] = sh[tid + i*BLK_HEIGHT+i];
}
/* Set matrix to identity */
__global__ void set_ident(real_t * A, int ld_panel_size, int m, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
// Offset the input vector
A += (BLK_WIDTH * BLK_WIDTH + ld_panel_size) * bid;
if(tid + BLK_WIDTH * bid >= n) return;
// Set diagonal
int index = tid + tid * BLK_WIDTH;
A[index] = 1.0;
}
/*************************************** Host functions *******************************************/
int orth_CAQR_size(const int m, const int n){
int blks_tall_cur = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT);
//int blks_wide_cur = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
vector<level_t*> levels;
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
//int blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
//int ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT;
int ldq = tb * BLK_HEIGHT;
int blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int ldq_panel = ldq * BLK_WIDTH;
return (ldq_panel * blks_wide_total);
}
//matrix is transposed inside every matrix block,
void QR_matrix::panelTranspose(const real_t * mat_in, const int m, const int n, const int lda) {
assert(lda >= m);
//CHECK_CUDA( hipMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
//calculate_dimensions(m, n);
// grid is set to the block number
// 1 threadblock is in charge of transpose 1 matrix block,
hipLaunchKernelGGL(( blockTranspose) , dim3(dim3(blks_tall_total, blks_wide_total)), dim3(BLK_HEIGHT) , 0, 0,
mat_base, mat_in, ld_panel_size, m, n, lda);
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
}
void QR_matrix::calculate_dimensions(const int m, const int n) {
this->m = m;
this->n = n;
m_current = m;
n_current = n;
blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
total_blocks = set_levels();
ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT; // the size of a panel, (which is transposed)
ldq = total_blocks * BLK_HEIGHT;
ldq_panel = ldq * BLK_WIDTH;
internal_matrix_size = ld_panel_size * blks_wide_total;
}
QR_matrix::QR_matrix() {}
QR_matrix::QR_matrix(real_t * d_A, const int m, const int n, const int lda) {
// Build the data structures
calculate_dimensions(m, n);
// Allocate the data matrix
CHECK_CUDA( hipMalloc((real_t**) &mat_base, internal_matrix_size * sizeof(real_t)));
Q_base = d_A;
// CHECK_CUDA( hipMalloc( (real_t**) &Q_base, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// CHECK_CUDA( hipMemset( Q_base, 0, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// Transpose
panelTranspose(d_A, m, n, lda);
// Allocate the Q matrix
// printf("A size = %d.\n", lda * n);
// printf("A' size = %d.\n", internal_matrix_size);
// printf("Q szie = %d.\n", ldq_panel * blks_wide_total);
// Set "current" pointers
mat_cur = mat_base;
Q = Q_base;
this->lda = lda;
}
#define SIMD_WIDTH 16
void QR_matrix::factor()
{
for(int i = 0; i < blks_wide_total; i++) {
// Factor two blocks on the left
hipLaunchKernelGGL(( hh_factor_dense) , dim3(blks_tall_cur), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), m_current, ld_panel_size, ldq);
// Update two blocks on the left and right
int y_wid = 1;
if(blks_tall_cur * blks_wide_cur < 2000) { y_wid = blks_wide_cur; }
else { y_wid = (blks_wide_cur - 1) / SIMD_WIDTH + 1;}
// **** Added from version 1.2 to get performance on large square **
hipLaunchKernelGGL(( hh_update_dense) , dim3(dim3(blks_tall_cur, y_wid)), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), ld_panel_size, ldq, blks_wide_cur);
for (int lev = 1; lev < levels.size(); lev++) {
level_t * cur_lev = levels[lev];
// TODO
hipLaunchKernelGGL(( hh_factor_triangle) , dim3(dim3(cur_lev->num_blocks, 1)), dim3(NUM_THREADS), 0, 0, mat_cur, blockQ(lev), m_current, ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
// TODO
hipLaunchKernelGGL(( hh_update_triangle) , dim3(dim3(cur_lev->num_blocks, blks_wide_cur)), dim3(NUM_THREADS), 0, 0, mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Next panel
increment(false);
}
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
}
void QR_matrix::retrieveQ()
{
/* set block Q to identity matrix
Q = |1 0 .. 0 |
|0 1 .. 0 |
|0 ..... |
*/
CHECK_CUDA( hipMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
calculate_dimensions(m, n);
hipLaunchKernelGGL(( set_ident) , dim3(dim3(blks_wide_total)), dim3(BLK_WIDTH) , 0, 0, mat_base, ld_panel_size, m, n);
// Set "current" pointers
mat_cur= mat_base;
Q = Q_base;
int k_blks = (n + BLK_WIDTH - 1) / BLK_WIDTH;
// A bit of a hack, but it's probably fine.
for(int panel = 0 ; panel < blks_wide_total - 1 ; panel++) increment(true);
for(int panel = blks_wide_total - 1 ; panel >= 0 ; panel--) {
// Probably want to iterate through "levels" here. That's why you used STL right?
for (int lev = levels.size() - 1; lev > 0; lev--) {
level_t * cur_lev = levels[lev];
hipLaunchKernelGGL(( hh_update_triangle_reverse) , dim3(dim3(cur_lev->num_blocks, k_blks)), dim3(NUM_THREADS) , 0, 0,
mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Update two blocks on the left and right
hipLaunchKernelGGL(( hh_update_dense_reverse) , dim3(dim3(blks_tall_cur, k_blks)), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), ld_panel_size, ldq);
// Next panel
decrement(true);
}
}
int QR_matrix::set_levels() {
levels.clear();
blks_tall_cur = ((m_current + BLK_HEIGHT - 1) / BLK_HEIGHT);
blks_wide_cur = (n_current + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
return tb;
}
void QR_matrix::panelTransInv(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim(blks_tall_total, blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
hipLaunchKernelGGL(( trans_inv) , dim3(gridDim), dim3(blockDim) , 0, 0, mat_out, mat_base, ld_panel_size, m, n, lda);
}
void QR_matrix::retrieveR(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim((blks_wide_total + BLK_ROWS - 1) / BLK_ROWS , blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
hipLaunchKernelGGL(( trans_inv) , dim3(gridDim), dim3(blockDim) , 0, 0, mat_out, mat_base, ld_panel_size, m, n, lda);
}
// Add to "current" pointers by one panel
void QR_matrix::increment(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur = mat_cur+ ld_panel_size;
mat_cur = mat_cur+ BLK_WIDTH * BLK_WIDTH;
Q = Q + ldq_panel;
m_current -= BLK_WIDTH;
n_current -= BLK_WIDTH;
set_levels();
}
// Add to "current" pointers by one panel
void QR_matrix::decrement(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur= mat_cur- ld_panel_size;
mat_cur = mat_cur - BLK_WIDTH * BLK_WIDTH;
Q = Q - ldq_panel;
m_current += BLK_WIDTH;
n_current += BLK_WIDTH;
set_levels();
}
/* destructor */
QR_matrix::~QR_matrix() {
CHECK_CUDA( hipFree(mat_base));
// CHECK_CUDA( hipFree(Q_base));
}
level_t::level_t() {}
level_t::level_t(int nb, int ab, int bo, int l) {
num_blocks = nb;
aggregate_blocks = ab;
block_offset = bo;
lev = l;
}
void orth_CAQR(real_t *d_A, const uint64_t m, const uint64_t n){
const int lda = roundup_to_32X( m );
QR_matrix *QRobj = new QR_matrix(d_A, m, n, lda);
// QR factorization
QRobj->factor();
//QRobj->retrieveR(d_A, m, n, lda);
// Retrieve Q
QRobj->retrieveQ();
QRobj->panelTransInv(d_A, m, n, lda);
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
delete QRobj;
}
| b69a199e3e813984875d91bcf439a897ae7eced3.cu | #include "assert.h"
#include "../utils/gpuErrorCheck.h"
#include "../utils/rsvd.h"
#include <vector>
#include <iostream>
#include <cuda.h>
/* Tuned settings for float and double */
/*
#define FLOAT
#define real_t float
#define BLK_WIDTH 16
#define BLK_HEIGHT 128
#define NUM_THREADS 128
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0xF
#define TID_U_SHIFT 4
#define ZERO_INIT {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
*/
#define DOUBLE
#define real_t double
#define BLK_WIDTH 8
#define BLK_HEIGHT 64
#define NUM_THREADS 64
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0x7
#define TID_U_SHIFT 3
#define ZERO_INIT {0,0,0,0,0,0,0,0}
/******************************************* class ****************************************************/
class level_t {
public:
int num_blocks;
int aggregate_blocks;
int block_offset;
int lev;
level_t();
level_t(int nb, int ab, int bo, int l);
};
using namespace std;
/* This defines the main matrix data structure and functions to
access and modify it */
class QR_matrix {
public:
/* Input matrix properties */
int m;
int n;
int lda;
int ld_panel_size;
int ldq;
int ldq_panel;
int blks_tall_total;
int blks_wide_total;
int internal_matrix_size;
int total_blocks;
real_t * mat_base;
real_t * Q_base;
/* Current information */
real_t * mat_cur;
real_t * Q;
int m_current;
int n_current;
int blks_tall_cur;
int blks_wide_cur;
vector<level_t*> levels;
/* Constructors*/
QR_matrix();
QR_matrix(real_t * h_A, const int m, const int n, const int lda);
~QR_matrix();
/* Set the matrix in its internal form (transpose) and retrieve it back */
void factor();
void retrieveQ();
void calculate_dimensions(const int m, const int n);
void panelTranspose(const real_t * mat_in, const int m, const int n, const int lda);
void panelTransInv(real_t * mat_out, const int m, const int n, const int lda);
void retrieveR(real_t * mat_out, const int m, const int n, const int lda);
real_t * blockQ(const int l);
/* Update the pointer to the next panel */
void increment(bool levelChagneFlag);
void decrement(bool levelChagneFlag);
int set_levels();
};
/*************************************** CUDA kernels *****************************************************/
__device__ real_t reduce(real_t * u_sh, real_t ub[], real_t col[], real_t * av, int tid_u, int tid_l, int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
ub[i] = u_sh[tid_u + i*BLK_ROWS];
real_t val = (real_t) 0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
val += ub[i] * col[i];
if(tid >= (NUM_THREADS/2)) av[tid] = val;
__syncthreads();
if(tid < (NUM_THREADS/2)) av[tid] = av[tid + (NUM_THREADS/2)] + val;
__syncthreads();
val = 0;
#pragma unroll
for(int i = 0 ; i < (BLK_ROWS/2) ; i++)
val += av[tid_l + BLK_WIDTH*i];
return val;
}
__device__ void update(real_t * u_sh, real_t ub[], real_t col[], real_t res, int tid_u)
{
// Rank-1 update
real_t fres = (real_t) 2 * res;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
col[i] -= (real_t) ub[i] * fres;
}
__device__ void load_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE; i++)
col[i] = a[tid + i*NUM_THREADS];
}
__device__ void load_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
real_t * a_orig = a;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE / (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
col[(BLK_WIDTH/BLK_ROWS)*i + ii ] = a[tid + ii*NUM_THREADS];
}
a += offset_blocks * BLK_SIZE;
}
a = a_orig;
}
__device__ void write_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
a[tid + i*NUM_THREADS] = col[i];
}
__device__ void write_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE/ (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
a[tid + ii*NUM_THREADS] = col[(BLK_WIDTH/BLK_ROWS)*i + ii ];
}
a += offset_blocks * BLK_SIZE;
}
}
__device__ void load_u(real_t * u_sh, real_t * b, int tid)
{
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
u_sh[tid + i*BLK_HEIGHT] = b[tid + i*BLK_HEIGHT];
__syncthreads();
}
__global__ void hh_update_dense_reverse(real_t * a, real_t * b, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1) * BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
}
__device__ void compute_u(real_t * u_sh, real_t col[], real_t norms[], int tid, int tid_u, int tid_l, int j, int row, int m)
{
__shared__ real_t mulby_sh;
__syncthreads();
if(j + row >= m) {
u_sh[tid] = (real_t) 0;
}
else {
if(tid_l == j) {
real_t local = 0.0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++) {
u_sh[tid_u + i*BLK_ROWS] = col[i];
if(tid_u + i*BLK_ROWS > j) local += col[i] * col[i];
}
norms[tid_u] = local;
}
__syncthreads();
if(tid == j)
{
real_t nm2_nminus1 = (real_t)0.0;
#pragma unroll
for(int i = 0 ; i < BLK_ROWS ; i++)
nm2_nminus1 += norms[i];
real_t top_element = u_sh[j];
#ifdef DOUBLE
real_t nm = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t nm = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
u_sh[j] = top_element = (top_element >= (real_t)0) ? top_element + nm : top_element - nm;
#ifdef DOUBLE
real_t divby = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t divby = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
mulby_sh = (divby != (real_t) 0) ? ((real_t) 1.0) / divby : (real_t)0;
}
if(tid < j) u_sh[tid] = (real_t) 0;
__syncthreads();
u_sh[tid] *= mulby_sh;
}
__syncthreads();
}
// factor a small matrix block, householder vector is saved on b
__global__ void hh_factor_dense(real_t * a, real_t * b, int m, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply: res = v' * A(i:m, :);
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update: A(j:m, :) -= 2 * v * res;
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
//
__global__ void hh_update_dense(real_t * a, real_t * b, int lda_panel, int ldq, int max_y)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
for(int p = blockIdx.y ; p < max_y ; p += gridDim.y)
{
// load in the block
load_a(a, col, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
u_sh = &u[0];
a += gridDim.y * lda_panel;
}
}
__global__ void hh_factor_triangle(real_t * a, real_t * b, int m, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * (BLK_HEIGHT / BLK_WIDTH) * offset_blocks * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += row * BLK_WIDTH + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
__global__ void hh_update_triangle_reverse(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update A_max in case we are working on a different column
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1)*BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
__global__ void hh_update_triangle(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update the max in case we are on a different panel
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
/* Get the address of a block (i,j) for level l in the Q matrix */
real_t * QR_matrix::blockQ(const int l) {
assert(l <= levels.size());
int agg_blocks = levels[l]->aggregate_blocks;
// Get pointer to the next level
int offset_blocks = agg_blocks * BLK_SIZE;
return Q + offset_blocks;
}
/* Panel transpose of a block
<<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>> */
__global__ void blockTranspose(real_t * out, const real_t * in,
int ld_panel_size, int m, int n, int ld_col)
{
// Shared memory
__shared__ real_t sh[(BLK_HEIGHT + 1) * BLK_WIDTH];// why + 1? to avoid bank conflict?
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the input vector address
in += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
out+= BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
// If we are close to the border then this will be < BLK_WIDTH
int n_it = n - BLK_WIDTH * blockIdx.y;
// Load whole block into shared memory into column major
if(tid + BLK_HEIGHT * blockIdx.x < m) {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH; i++)
sh[tid + i*BLK_HEIGHT+i] = (i < n_it) ? in[tid + i * ld_col] : (real_t) 0;
} else {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[tid + i*BLK_HEIGHT+i] = (real_t) 0;
}
__syncthreads();
// Load block out of shared memory in transposed form
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
{ out[tid + i * BLK_HEIGHT] = sh[off + i * BLK_ROWS]; }
}
/* Panel transpose of the entire matrix (inverse) */
__global__ void trans_inv(real_t * out, const real_t * in, int ld_panel_size, int m, int n, int ld_col)
{
__shared__ real_t sh[(BLK_HEIGHT+1) * BLK_WIDTH];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the output matrix
in+= BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
out+= BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
// In case we run off the end in the n direction
int n_it = n - BLK_WIDTH * blockIdx.y;
n_it = (n_it < BLK_WIDTH) ? n_it : BLK_WIDTH;
// Load block into shared memory in column major
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[off + i * BLK_ROWS] = in[tid + i * BLK_HEIGHT];
__syncthreads();
// Wrtite back out
if(tid + BLK_HEIGHT * blockIdx.x >= m) return;
for(int i = 0 ; i < n_it; i++)
out[tid + i * ld_col] = sh[tid + i*BLK_HEIGHT+i];
}
/* Set matrix to identity */
__global__ void set_ident(real_t * A, int ld_panel_size, int m, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
// Offset the input vector
A += (BLK_WIDTH * BLK_WIDTH + ld_panel_size) * bid;
if(tid + BLK_WIDTH * bid >= n) return;
// Set diagonal
int index = tid + tid * BLK_WIDTH;
A[index] = 1.0;
}
/*************************************** Host functions *******************************************/
int orth_CAQR_size(const int m, const int n){
int blks_tall_cur = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT);
//int blks_wide_cur = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
vector<level_t*> levels;
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
//int blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
//int ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT;
int ldq = tb * BLK_HEIGHT;
int blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int ldq_panel = ldq * BLK_WIDTH;
return (ldq_panel * blks_wide_total);
}
//matrix is transposed inside every matrix block,
void QR_matrix::panelTranspose(const real_t * mat_in, const int m, const int n, const int lda) {
assert(lda >= m);
//CHECK_CUDA( cudaMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
//calculate_dimensions(m, n);
// grid is set to the block number
// 1 threadblock is in charge of transpose 1 matrix block,
blockTranspose <<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>>
(mat_base, mat_in, ld_panel_size, m, n, lda);
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
}
void QR_matrix::calculate_dimensions(const int m, const int n) {
this->m = m;
this->n = n;
m_current = m;
n_current = n;
blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
total_blocks = set_levels();
ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT; // the size of a panel, (which is transposed)
ldq = total_blocks * BLK_HEIGHT;
ldq_panel = ldq * BLK_WIDTH;
internal_matrix_size = ld_panel_size * blks_wide_total;
}
QR_matrix::QR_matrix() {}
QR_matrix::QR_matrix(real_t * d_A, const int m, const int n, const int lda) {
// Build the data structures
calculate_dimensions(m, n);
// Allocate the data matrix
CHECK_CUDA( cudaMalloc((real_t**) &mat_base, internal_matrix_size * sizeof(real_t)));
Q_base = d_A;
// CHECK_CUDA( cudaMalloc( (real_t**) &Q_base, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// CHECK_CUDA( cudaMemset( Q_base, 0, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// Transpose
panelTranspose(d_A, m, n, lda);
// Allocate the Q matrix
// printf("A size = %d.\n", lda * n);
// printf("A' size = %d.\n", internal_matrix_size);
// printf("Q szie = %d.\n", ldq_panel * blks_wide_total);
// Set "current" pointers
mat_cur = mat_base;
Q = Q_base;
this->lda = lda;
}
#define SIMD_WIDTH 16
void QR_matrix::factor()
{
for(int i = 0; i < blks_wide_total; i++) {
// Factor two blocks on the left
hh_factor_dense <<< blks_tall_cur, NUM_THREADS >>> (mat_cur, blockQ(0), m_current, ld_panel_size, ldq);
// Update two blocks on the left and right
int y_wid = 1;
if(blks_tall_cur * blks_wide_cur < 2000) { y_wid = blks_wide_cur; }
else { y_wid = (blks_wide_cur - 1) / SIMD_WIDTH + 1;}
// **** Added from version 1.2 to get performance on large square **
hh_update_dense <<< dim3(blks_tall_cur, y_wid), NUM_THREADS >>> (mat_cur, blockQ(0), ld_panel_size, ldq, blks_wide_cur);
for (int lev = 1; lev < levels.size(); lev++) {
level_t * cur_lev = levels[lev];
// TODO
hh_factor_triangle <<< dim3(cur_lev->num_blocks, 1), NUM_THREADS>>> (mat_cur, blockQ(lev), m_current, ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
// TODO
hh_update_triangle <<< dim3(cur_lev->num_blocks, blks_wide_cur), NUM_THREADS>>> (mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Next panel
increment(false);
}
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
}
void QR_matrix::retrieveQ()
{
/* set block Q to identity matrix
Q = |1 0 .. 0 |
|0 1 .. 0 |
|0 ..... |
*/
CHECK_CUDA( cudaMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
calculate_dimensions(m, n);
set_ident <<< dim3(blks_wide_total), BLK_WIDTH >>> (mat_base, ld_panel_size, m, n);
// Set "current" pointers
mat_cur= mat_base;
Q = Q_base;
int k_blks = (n + BLK_WIDTH - 1) / BLK_WIDTH;
// A bit of a hack, but it's probably fine.
for(int panel = 0 ; panel < blks_wide_total - 1 ; panel++) increment(true);
for(int panel = blks_wide_total - 1 ; panel >= 0 ; panel--) {
// Probably want to iterate through "levels" here. That's why you used STL right?
for (int lev = levels.size() - 1; lev > 0; lev--) {
level_t * cur_lev = levels[lev];
hh_update_triangle_reverse <<< dim3(cur_lev->num_blocks, k_blks), NUM_THREADS >>>
(mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Update two blocks on the left and right
hh_update_dense_reverse <<< dim3(blks_tall_cur, k_blks), NUM_THREADS >>> (mat_cur, blockQ(0), ld_panel_size, ldq);
// Next panel
decrement(true);
}
}
int QR_matrix::set_levels() {
levels.clear();
blks_tall_cur = ((m_current + BLK_HEIGHT - 1) / BLK_HEIGHT);
blks_wide_cur = (n_current + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
return tb;
}
void QR_matrix::panelTransInv(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim(blks_tall_total, blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
trans_inv <<< gridDim, blockDim >>> (mat_out, mat_base, ld_panel_size, m, n, lda);
}
void QR_matrix::retrieveR(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim((blks_wide_total + BLK_ROWS - 1) / BLK_ROWS , blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
trans_inv <<< gridDim, blockDim >>> (mat_out, mat_base, ld_panel_size, m, n, lda);
}
// Add to "current" pointers by one panel
void QR_matrix::increment(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur = mat_cur+ ld_panel_size;
mat_cur = mat_cur+ BLK_WIDTH * BLK_WIDTH;
Q = Q + ldq_panel;
m_current -= BLK_WIDTH;
n_current -= BLK_WIDTH;
set_levels();
}
// Add to "current" pointers by one panel
void QR_matrix::decrement(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur= mat_cur- ld_panel_size;
mat_cur = mat_cur - BLK_WIDTH * BLK_WIDTH;
Q = Q - ldq_panel;
m_current += BLK_WIDTH;
n_current += BLK_WIDTH;
set_levels();
}
/* destructor */
QR_matrix::~QR_matrix() {
CHECK_CUDA( cudaFree(mat_base));
// CHECK_CUDA( cudaFree(Q_base));
}
level_t::level_t() {}
level_t::level_t(int nb, int ab, int bo, int l) {
num_blocks = nb;
aggregate_blocks = ab;
block_offset = bo;
lev = l;
}
void orth_CAQR(real_t *d_A, const uint64_t m, const uint64_t n){
const int lda = roundup_to_32X( m );
QR_matrix *QRobj = new QR_matrix(d_A, m, n, lda);
// QR factorization
QRobj->factor();
//QRobj->retrieveR(d_A, m, n, lda);
// Retrieve Q
QRobj->retrieveQ();
QRobj->panelTransInv(d_A, m, n, lda);
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
delete QRobj;
}
|
5079dbb539a9e3135ae9ac0cde7f9588b72c8ed6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void sobelInCuda(float *dataIn, float *dataOut, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int index = yIndex * imgWidth + xIndex;
int Gx = 0;
int Gy = 0;
if (xIndex > 0 && xIndex < imgWidth - 1 && yIndex > 0 && yIndex < imgHeight - 1)
{
Gx = dataIn[(yIndex - 1) * imgWidth + xIndex + 1] + 2 * dataIn[yIndex * imgWidth + xIndex + 1] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[yIndex * imgWidth + xIndex - 1] + dataIn[(yIndex + 1) * imgWidth + xIndex - 1]);
Gy = dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex - 1) * imgWidth + xIndex] + dataIn[(yIndex - 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex + 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex + 1) * imgWidth + xIndex] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]);
Gx = Gx > 0?Gx:(-Gx);
Gy = Gy > 0?Gy:(-Gy);
dataOut[index] = (Gx + Gy) / 2;
}
}
| 5079dbb539a9e3135ae9ac0cde7f9588b72c8ed6.cu |
__global__ void sobelInCuda(float *dataIn, float *dataOut, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int index = yIndex * imgWidth + xIndex;
int Gx = 0;
int Gy = 0;
if (xIndex > 0 && xIndex < imgWidth - 1 && yIndex > 0 && yIndex < imgHeight - 1)
{
Gx = dataIn[(yIndex - 1) * imgWidth + xIndex + 1] + 2 * dataIn[yIndex * imgWidth + xIndex + 1] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[yIndex * imgWidth + xIndex - 1] + dataIn[(yIndex + 1) * imgWidth + xIndex - 1]);
Gy = dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex - 1) * imgWidth + xIndex] + dataIn[(yIndex - 1) * imgWidth + xIndex + 1]
- (dataIn[(yIndex + 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex + 1) * imgWidth + xIndex] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]);
Gx = Gx > 0?Gx:(-Gx);
Gy = Gy > 0?Gy:(-Gy);
dataOut[index] = (Gx + Gy) / 2;
}
}
|
9d08df0e95297861634dc7911b7ddac2f3ae7452.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
#include "weighted_average_wirelength/src/functional_cuda.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T, typename V>
int computeWeightedAverageWirelengthCudaLauncher(
const T *x, const T *y,
const int *pin2net_map,
const int *flat_netpin,
const int *netpin_start,
const unsigned char *net_mask,
int num_nets,
int num_pins,
const T *inv_gamma,
T *exp_xy, T *exp_nxy,
T *exp_xy_sum, T *exp_nxy_sum,
T *xyexp_xy_sum, T *xyexp_nxy_sum,
V *xy_max, V *xy_min,
T *partial_wl,
const T *grad_tensor,
T *grad_x_tensor, T *grad_y_tensor)
{
int thread_count = 64;
int block_count_pins = (num_pins - 1 + thread_count) / thread_count;
int block_count_nets = (num_nets - 1 + thread_count) / thread_count;
dim3 block_size(thread_count, 2, 1);
if (grad_tensor)
{
// computeWeightedAverageWirelengthGradInterleaveNetByNet<<<block_count_pins, block_size>>>(
hipLaunchKernelGGL(( computeWeightedAverageWirelengthGradNetByNet), dim3(block_count_pins), dim3(thread_count), 0, 0,
x, y,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
xyexp_xy_sum, xyexp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
num_pins,
inv_gamma,
grad_tensor,
grad_x_tensor, grad_y_tensor);
}
else
{
// compute max and min in one kernel (net by net)
// computeMaxMinInterleaveNetByNet<<<block_count_nets, block_size>>>(
hipLaunchKernelGGL(( computeMaxMinNetByNet), dim3(block_count_nets), dim3(thread_count), 0, 0,
x, y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
xy_max,
xy_min);
// compute plus-minus exp, sum of plus-minus exp, sum of x*exp in one CUDA kernels (net by net)
// corresponding to the plus and minus a b c kernels in the DREAMPlace paper
// compute partial wirelength at the same time
hipLaunchKernelGGL(( computeABCKernelsInterleaveAndWLNetByNet), dim3(block_count_nets), dim3(block_size), 0, 0,
// computeABCKernelsAndWLNetByNet<<<block_count_nets, thread_count>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
num_pins,
inv_gamma,
xy_max, xy_min,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
xyexp_xy_sum, xyexp_nxy_sum,
partial_wl);
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeWeightedAverageWirelengthLauncher( \
const T *x, const T *y, \
const int *pin2net_map, \
const int *flat_netpin, \
const int *netpin_start, \
const unsigned char *net_mask, \
int num_nets, \
int num_pins, \
const T *inv_gamma, \
T *exp_xy, T *exp_nxy, \
T *exp_xy_sum, T *exp_nxy_sum, \
T *xyexp_xy_sum, T *xyexp_nxy_sum, \
V *xy_max, V *xy_min, \
T *partial_wl, \
const T *grad_tensor, \
T *grad_x_tensor, T *grad_y_tensor) \
{ \
return computeWeightedAverageWirelengthCudaLauncher( \
x, y, \
pin2net_map, \
flat_netpin, \
netpin_start, \
net_mask, \
num_nets, \
num_pins, \
inv_gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xyexp_xy_sum, xyexp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor); \
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
| 9d08df0e95297861634dc7911b7ddac2f3ae7452.cu | #include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
#include "weighted_average_wirelength/src/functional_cuda.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T, typename V>
int computeWeightedAverageWirelengthCudaLauncher(
const T *x, const T *y,
const int *pin2net_map,
const int *flat_netpin,
const int *netpin_start,
const unsigned char *net_mask,
int num_nets,
int num_pins,
const T *inv_gamma,
T *exp_xy, T *exp_nxy,
T *exp_xy_sum, T *exp_nxy_sum,
T *xyexp_xy_sum, T *xyexp_nxy_sum,
V *xy_max, V *xy_min,
T *partial_wl,
const T *grad_tensor,
T *grad_x_tensor, T *grad_y_tensor)
{
int thread_count = 64;
int block_count_pins = (num_pins - 1 + thread_count) / thread_count;
int block_count_nets = (num_nets - 1 + thread_count) / thread_count;
dim3 block_size(thread_count, 2, 1);
if (grad_tensor)
{
// computeWeightedAverageWirelengthGradInterleaveNetByNet<<<block_count_pins, block_size>>>(
computeWeightedAverageWirelengthGradNetByNet<<<block_count_pins, thread_count>>>(
x, y,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
xyexp_xy_sum, xyexp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
num_pins,
inv_gamma,
grad_tensor,
grad_x_tensor, grad_y_tensor);
}
else
{
// compute max and min in one kernel (net by net)
// computeMaxMinInterleaveNetByNet<<<block_count_nets, block_size>>>(
computeMaxMinNetByNet<<<block_count_nets, thread_count>>>(
x, y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
xy_max,
xy_min);
// compute plus-minus exp, sum of plus-minus exp, sum of x*exp in one CUDA kernels (net by net)
// corresponding to the plus and minus a b c kernels in the DREAMPlace paper
// compute partial wirelength at the same time
computeABCKernelsInterleaveAndWLNetByNet<<<block_count_nets, block_size>>>(
// computeABCKernelsAndWLNetByNet<<<block_count_nets, thread_count>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
num_pins,
inv_gamma,
xy_max, xy_min,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
xyexp_xy_sum, xyexp_nxy_sum,
partial_wl);
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeWeightedAverageWirelengthLauncher( \
const T *x, const T *y, \
const int *pin2net_map, \
const int *flat_netpin, \
const int *netpin_start, \
const unsigned char *net_mask, \
int num_nets, \
int num_pins, \
const T *inv_gamma, \
T *exp_xy, T *exp_nxy, \
T *exp_xy_sum, T *exp_nxy_sum, \
T *xyexp_xy_sum, T *xyexp_nxy_sum, \
V *xy_max, V *xy_min, \
T *partial_wl, \
const T *grad_tensor, \
T *grad_x_tensor, T *grad_y_tensor) \
{ \
return computeWeightedAverageWirelengthCudaLauncher( \
x, y, \
pin2net_map, \
flat_netpin, \
netpin_start, \
net_mask, \
num_nets, \
num_pins, \
inv_gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xyexp_xy_sum, xyexp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor); \
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
|
fdc7fea894bf167ad19fafae9e987498dd1d8edb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file hydro_cuda.cu
* \brief Definitions of functions used in all cuda integration algorithms. */
#ifdef CUDA
#include<stdio.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"gravity_cuda.h"
__global__ void Update_Conserved_Variables_1D(Real *dev_conserved, Real *dev_F, int n_cells, int x_off, int n_ghost, Real dx, Real xbound, Real dt, Real gamma, int n_fields)
{
int id;
#if defined(DE) || defined(STATIC_GRAV)
Real d, d_inv, vx;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy, vz, P;
#endif
#ifdef STATIC_GRAV
Real gx, d_n, d_inv_n, vx_n;
gx = 0.0;
#endif
Real dtodx = dt/dx;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
#if defined(DE) || defined(STATIC_GRAV)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
#endif
#ifdef DE
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
vx_imo = dev_conserved[1*n_cells + id-1]/dev_conserved[id-1];
vx_ipo = dev_conserved[1*n_cells + id+1]/dev_conserved[id+1];
#endif
// update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F[ id-1] - dev_F[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F[ n_cells + id-1] - dev_F[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F[2*n_cells + id-1] - dev_F[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F[3*n_cells + id-1] - dev_F[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F[4*n_cells + id-1] - dev_F[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F[(5+i)*n_cells + id-1] - dev_F[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F[(n_fields-1)*n_cells + id-1] - dev_F[(n_fields-1)*n_cells + id])
+ dtodx * P * 0.5 * (vx_imo - vx_ipo);
#endif
#ifdef STATIC_GRAV // add gravitational source terms, time averaged from n to n+1
calc_g_1D(id, x_off, n_ghost, dx, xbound, &gx);
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n);
#endif
if (dev_conserved[id] != dev_conserved[id]) printf("%3d Thread crashed in final update. %f\n", id, dev_conserved[id]);
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0) printf("%d Negative pressure after final update.\n", id);
*/
}
}
__global__ void Update_Conserved_Variables_2D(Real *dev_conserved, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, Real gamma, int n_fields)
{
int id, xid, yid, n_cells;
int imo, jmo;
#if defined (DE) || defined(STATIC_GRAV)
Real d, d_inv, vx, vy;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz, P;
int ipo, jpo;
#endif
#ifdef STATIC_GRAV
Real gx, gy, d_n, d_inv_n, vx_n, vy_n;
gx = 0.0;
gy = 0.0;
#endif
Real dtodx = dt/dx;
Real dtody = dt/dy;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
imo = xid-1 + yid*nx;
jmo = xid + (yid-1)*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
#if defined (DE) || defined (STATIC_GRAV)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
#endif
#ifdef DE
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
ipo = xid+1 + yid*nx;
jpo = xid + (yid+1)*nx;
vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo];
#endif
// update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F_x[ imo] - dev_F_x[ id])
+ dtody * (dev_F_y[ jmo] - dev_F_y[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo));
#endif
#ifdef STATIC_GRAV
// calculate the gravitational acceleration as a function of x & y position
calc_g_2D(xid, yid, x_off, y_off, n_ghost, dx, dy, xbound, ybound, &gx, &gy);
// add gravitational source terms, time averaged from n to n+1
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n)
+ 0.25*dt*gy*(d + d_n)*(vy + vy_n);
#endif
if (dev_conserved[id] < 0.0 || dev_conserved[id] != dev_conserved[id]) {
printf("%3d %3d Thread crashed in final update. %f %f %f\n", xid, yid, dtodx*(dev_F_x[imo]-dev_F_x[id]), dtody*(dev_F_y[jmo]-dev_F_y[id]), dev_conserved[id]);
}
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0)
printf("%3d %3d Negative pressure after final update. %f %f %f %f\n", xid, yid, dev_conserved[4*n_cells + id], 0.5*d*vx*vx, 0.5*d*vy*vy, P);
*/
}
}
__global__ void Update_Conserved_Variables_3D(Real *dev_conserved, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z,
int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost,
Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt,
Real gamma, int n_fields, Real dens_floor )
{
int id, xid, yid, zid, n_cells;
int imo, jmo, kmo;
#if defined (DE) || defined(STATIC_GRAV) || defined(GRAVITY)
Real d, d_inv, vx, vy, vz;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P, E, GE, E_kin;
int ipo, jpo, kpo;
#endif
#ifdef STATIC_GRAV
Real gx, gy, gz, d_n, d_inv_n, vx_n, vy_n, vz_n;
gx = 0.0;
gy = 0.0;
gz = 0.0;
#endif
#ifdef GRAVITY
#ifndef GRAVITY_CPU
Real gx, gy, gz, d_n, d_inv_n, vx_n, vy_n, vz_n;
Real pot_l, pot_r;
int id_l, id_r;
gx = 0.0;
gy = 0.0;
gz = 0.0;
int field_pot;
#ifdef DE
field_pot = n_fields - 2;
#endif //DE
#ifndef DE
field_pot = n_fields - 1;
#endif //DE
#endif //GRAVITY_CPU
#endif //GRAVTY
Real dtodx = dt/dx;
Real dtody = dt/dy;
Real dtodz = dt/dz;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
imo = xid-1 + yid*nx + zid*nx*ny;
jmo = xid + (yid-1)*nx + zid*nx*ny;
kmo = xid + yid*nx + (zid-1)*nx*ny;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
#if defined (DE) || defined(STATIC_GRAV) || defined(GRAVITY)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
GE = fmin(dev_conserved[(n_fields-1)*n_cells + id], 1e-6);
#endif
#ifdef DE
// P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
// if (P < 0.0) P = dev_conserved[(n_fields-1)*n_cells + id] * (gamma - 1.0);
E_kin = 0.5 * d * ( vx*vx + vy*vy + vz*vz );
P = Get_Pressure_Dual_Energy( E, E - E_kin, GE, gamma );
if (d < 0.0 || d != d) printf("Negative density before final update.\n");
if (P < 0.0) printf("%d Negative pressure before final update.\n", id);
ipo = xid+1 + yid*nx + zid*nx*ny;
jpo = xid + (yid+1)*nx + zid*nx*ny;
kpo = xid + yid*nx + (zid+1)*nx*ny;
vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo];
vz_kmo = dev_conserved[3*n_cells + kmo] / dev_conserved[kmo];
vz_kpo = dev_conserved[3*n_cells + kpo] / dev_conserved[kpo];
#endif
// // update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F_x[ imo] - dev_F_x[ id])
+ dtody * (dev_F_y[ jmo] - dev_F_y[ id])
+ dtodz * (dev_F_z[ kmo] - dev_F_z[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id])
+ dtodz * (dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id])
+ dtodz * (dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id])
+ dtodz * (dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id])
+ dtodz * (dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id])
+ dtodz * (dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ dtodz * (dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id])
+ 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo) + dtodz*(vz_kmo-vz_kpo));
#endif
#ifdef STATIC_GRAV
calc_g_3D(xid, yid, zid, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound, zbound, &gx, &gy, &gz);
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
vz_n = dev_conserved[3*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[3*n_cells + id] += 0.5*dt*gz*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n)
+ 0.25*dt*gy*(d + d_n)*(vy + vy_n)
+ 0.25*dt*gz*(d + d_n)*(vz + vz_n);
#endif
#ifdef DENSITY_FLOOR
if ( dev_conserved[ id] < dens_floor ){
printf("###Thread density change %f -> %f \n", dev_conserved[ id], dens_floor );
dev_conserved[ id] = dens_floor;
}
#endif
#ifdef GRAVITY
#ifndef GRAVITY_CPU
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
vz_n = dev_conserved[3*n_cells + id] * d_inv_n;
// Calculate the -gradient of potential
// Get X componet of gravity field
id_l = (xid-1) + (yid)*nx + (zid)*nx*ny;
id_r = (xid+1) + (yid)*nx + (zid)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gx = -0.5*( pot_r - pot_l ) / dx;
//Get Y componet of gravity field
id_l = (xid) + (yid-1)*nx + (zid)*nx*ny;
id_r = (xid) + (yid+1)*nx + (zid)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gy = -0.5*( pot_r - pot_l ) / dy;
//Get Z componet of gravity field
id_l = (xid) + (yid)*nx + (zid-1)*nx*ny;
id_r = (xid) + (yid)*nx + (zid+1)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gz = -0.5*( pot_r - pot_l ) / dz;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[3*n_cells + id] += 0.5*dt*gz*(d + d_n);
dev_conserved[4*n_cells + id] += 0.5*dt*gx*(d*vx + d_n*vx_n) + 0.5*dt*gy*(d*vy + d_n*vy_n) + 0.5*dt*gz*(d*vz + d_n*vz_n);;
#endif //GRAVITY_CPU
#endif //GRAVITY
#ifndef TEMPERATURE_FLOOR
if (dev_conserved[id] < 0.0 || dev_conserved[id] != dev_conserved[id] || dev_conserved[4*n_cells + id] < 0.0 || dev_conserved[4*n_cells+id] != dev_conserved[4*n_cells+id]) {
printf("%3d %3d %3d Thread crashed in final update. %e %e %e %e %e\n", xid+x_off, yid+y_off, zid+z_off, dev_conserved[id], dtodx*(dev_F_x[imo]-dev_F_x[id]), dtody*(dev_F_y[jmo]-dev_F_y[id]), dtodz*(dev_F_z[kmo]-dev_F_z[id]), dev_conserved[4*n_cells+id]);
}
#endif
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0) printf("%3d %3d %3d Negative pressure after final update. %f %f %f %f %f\n", xid, yid, zid, dev_conserved[4*n_cells + id], 0.5*d*vx*vx, 0.5*d*vy*vy, 0.5*d*vz*vz, P);
*/
}
}
__global__ void Sync_Energies_1D(Real *dev_conserved, int n_cells, int n_ghost, Real gamma, int n_fields)
{
int id;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int im1, ip1;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
im1 = max(id-1, n_ghost);
ip1 = min(id+1, n_cells-n_ghost-1);
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
// find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + im1], E);
Emax = fmax(dev_conserved[4*n_cells + ip1], Emax);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
dev_conserved[4*n_cells + id] += ge1 - ge2;
}
/*
// if the conservatively calculated internal energy is greater than the estimate of the truncation error,
// use the internal energy computed from the total energy to do the update
//find the max nearby velocity difference (estimate of truncation error)
vmax = fmax(fabs(vx-dev_conserved[1*n_cells + im1]/dev_conserved[im1]), fabs(dev_conserved[1*n_cells + ip1]/dev_conserved[ip1]-vx));
//printf("%3d %f %f %f %f\n", id, ge1, ge2, vmax, 0.25*d*vmax*vmax);
if (ge2 > 0.25*d*vmax*vmax) {
dev_conserved[5*n_cells + id] = ge2;
ge1 = ge2;
}
//else printf("%d Using ge1 %f %f %f %f\n", id, ge1, ge2, vmax, 0.25*d*vmax*vmax);
*/
// calculate the pressure
//P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%d Negative pressure after internal energy sync. %f %f \n", id, ge1, ge2);
}
}
__global__ void Sync_Energies_2D(Real *dev_conserved, int nx, int ny, int n_ghost, Real gamma, int n_fields)
{
int id, xid, yid, n_cells;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int imo, ipo, jmo, jpo;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
imo = max(xid-1, n_ghost);
imo = imo + yid*nx;
ipo = min(xid+1, nx-n_ghost-1);
ipo = ipo + yid*nx;
jmo = max(yid-1, n_ghost);
jmo = xid + jmo*nx;
jpo = min(yid+1, ny-n_ghost-1);
jpo = xid + jpo*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
//find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + imo], E);
Emax = fmax(Emax, dev_conserved[4*n_cells + ipo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jpo]);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
dev_conserved[4*n_cells + id] += ge1 - ge2;
}
// calculate the pressure
//Real P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%d Negative pressure after internal energy sync. %f %f \n", id, ge1, ge2);
}
}
__global__ void Sync_Energies_3D(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, Real gamma, int n_fields)
{
int id, xid, yid, zid, n_cells;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int imo, ipo, jmo, jpo, kmo, kpo;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
imo = max(xid-1, n_ghost);
imo = imo + yid*nx + zid*nx*ny;
ipo = min(xid+1, nx-n_ghost-1);
ipo = ipo + yid*nx + zid*nx*ny;
jmo = max(yid-1, n_ghost);
jmo = xid + jmo*nx + zid*nx*ny;
jpo = min(yid+1, ny-n_ghost-1);
jpo = xid + jpo*nx + zid*nx*ny;
kmo = max(zid-1, n_ghost);
kmo = xid + yid*nx + kmo*nx*ny;
kpo = min(zid+1, nz-n_ghost-1);
kpo = xid + yid*nx + kpo*nx*ny;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// don't do the energy sync if this thread has crashed
if (E < 0.0 || E != E) return;
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2 > 0.0 && E > 0.0 && ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
//find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + imo], E);
Emax = fmax(Emax, dev_conserved[4*n_cells + ipo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jpo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + kmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + kpo]);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1 && ge2 > 0.0 && Emax > 0.0) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
if (ge1 > 0.0) dev_conserved[4*n_cells + id] += ge1 - ge2;
else dev_conserved[(n_fields-1)*n_cells+id] = ge2;
}
// calculate the pressure
//Real P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%3d %3d %3d Negative pressure after internal energy sync. %f %f %f\n", xid, yid, zid, P/(gamma-1.0), ge1, ge2);
}
}
#ifdef COSMOLOGY
__global__ void Apply_Temperature_Floor(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real temp_floor )
{
int id, xid, yid, zid, n_cells;
// Real d, d_inv, vx, vy, vz, P, E;
// Real ge1, ge2, Emax;
// int imo, ipo, jmo, jpo, kmo, kpo;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
Real dens, u;
dens = dev_conserved[ id];
u = dev_conserved[(n_fields-1)*n_cells + id];
Real temp = u / dens;
Real u_new, delta_u;
if ( temp < temp_floor ){
temp = temp_floor;
u_new = temp * dens ;
delta_u = u_new - u;
// delta_u = delta_u / vel_0 / vel_0 * current_a * current_a;
dev_conserved[(n_fields-1)*n_cells + id] += delta_u;
dev_conserved[4*n_cells + id] += delta_u;
}
}
}
#endif
__global__ void Calc_dt_1D(Real *dev_conserved, int n_cells, int n_ghost, Real dx, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, P, cs;
int id, tid;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
// start timestep calculation here
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
P = fmax(P, (Real) TINY_NUMBER);
// find the max wavespeed in that cell, use it to calculate the inverse timestep
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = (fabs(vx)+cs)/dx;
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockIdx.x] = max_dti[0];
}
__global__ void Calc_dt_2D(Real *dev_conserved, int nx, int ny, int n_ghost, Real dx, Real dy, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, P, cs;
int id, tid, xid, yid, n_cells;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
P = fmax(P, (Real) 1.0e-20);
// find the max wavespeed in that cell, use it to calculate the inverse timestep
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = fmax((fabs(vx)+cs)/dx, (fabs(vy)+cs)/dy);
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockId] = max_dti[0];
}
__global__ void Calc_dt_3D(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, E, P, cs;
int id, xid, yid, zid, n_cells;
int tid;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
P = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = fmax((fabs(vx)+cs)/dx, (fabs(vy)+cs)/dy);
max_dti[tid] = fmax(max_dti[tid], (fabs(vz)+cs)/dz);
max_dti[tid] = fmax(max_dti[tid], 0);
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockIdx.x] = max_dti[0];
}
#ifdef DE
__device__ Real Get_Pressure_Dual_Energy( Real E, Real U_total, Real U_advected, Real gamma ){
Real U, P;
Real eta = 0.001;
if( U_total / E > eta ) U = U_total;
else U = U_advected;
P = U * (gamma - 1.0);
return P;
}
#endif //DE
#endif //CUDA
| fdc7fea894bf167ad19fafae9e987498dd1d8edb.cu | /*! \file hydro_cuda.cu
* \brief Definitions of functions used in all cuda integration algorithms. */
#ifdef CUDA
#include<stdio.h>
#include<math.h>
#include<cuda.h>
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"gravity_cuda.h"
__global__ void Update_Conserved_Variables_1D(Real *dev_conserved, Real *dev_F, int n_cells, int x_off, int n_ghost, Real dx, Real xbound, Real dt, Real gamma, int n_fields)
{
int id;
#if defined(DE) || defined(STATIC_GRAV)
Real d, d_inv, vx;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy, vz, P;
#endif
#ifdef STATIC_GRAV
Real gx, d_n, d_inv_n, vx_n;
gx = 0.0;
#endif
Real dtodx = dt/dx;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
#if defined(DE) || defined(STATIC_GRAV)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
#endif
#ifdef DE
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
vx_imo = dev_conserved[1*n_cells + id-1]/dev_conserved[id-1];
vx_ipo = dev_conserved[1*n_cells + id+1]/dev_conserved[id+1];
#endif
// update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F[ id-1] - dev_F[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F[ n_cells + id-1] - dev_F[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F[2*n_cells + id-1] - dev_F[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F[3*n_cells + id-1] - dev_F[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F[4*n_cells + id-1] - dev_F[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F[(5+i)*n_cells + id-1] - dev_F[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F[(n_fields-1)*n_cells + id-1] - dev_F[(n_fields-1)*n_cells + id])
+ dtodx * P * 0.5 * (vx_imo - vx_ipo);
#endif
#ifdef STATIC_GRAV // add gravitational source terms, time averaged from n to n+1
calc_g_1D(id, x_off, n_ghost, dx, xbound, &gx);
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n);
#endif
if (dev_conserved[id] != dev_conserved[id]) printf("%3d Thread crashed in final update. %f\n", id, dev_conserved[id]);
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0) printf("%d Negative pressure after final update.\n", id);
*/
}
}
__global__ void Update_Conserved_Variables_2D(Real *dev_conserved, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, Real gamma, int n_fields)
{
int id, xid, yid, n_cells;
int imo, jmo;
#if defined (DE) || defined(STATIC_GRAV)
Real d, d_inv, vx, vy;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz, P;
int ipo, jpo;
#endif
#ifdef STATIC_GRAV
Real gx, gy, d_n, d_inv_n, vx_n, vy_n;
gx = 0.0;
gy = 0.0;
#endif
Real dtodx = dt/dx;
Real dtody = dt/dy;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
imo = xid-1 + yid*nx;
jmo = xid + (yid-1)*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
#if defined (DE) || defined (STATIC_GRAV)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
#endif
#ifdef DE
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
ipo = xid+1 + yid*nx;
jpo = xid + (yid+1)*nx;
vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo];
#endif
// update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F_x[ imo] - dev_F_x[ id])
+ dtody * (dev_F_y[ jmo] - dev_F_y[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo));
#endif
#ifdef STATIC_GRAV
// calculate the gravitational acceleration as a function of x & y position
calc_g_2D(xid, yid, x_off, y_off, n_ghost, dx, dy, xbound, ybound, &gx, &gy);
// add gravitational source terms, time averaged from n to n+1
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n)
+ 0.25*dt*gy*(d + d_n)*(vy + vy_n);
#endif
if (dev_conserved[id] < 0.0 || dev_conserved[id] != dev_conserved[id]) {
printf("%3d %3d Thread crashed in final update. %f %f %f\n", xid, yid, dtodx*(dev_F_x[imo]-dev_F_x[id]), dtody*(dev_F_y[jmo]-dev_F_y[id]), dev_conserved[id]);
}
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0)
printf("%3d %3d Negative pressure after final update. %f %f %f %f\n", xid, yid, dev_conserved[4*n_cells + id], 0.5*d*vx*vx, 0.5*d*vy*vy, P);
*/
}
}
__global__ void Update_Conserved_Variables_3D(Real *dev_conserved, Real *dev_F_x, Real *dev_F_y, Real *dev_F_z,
int nx, int ny, int nz, int x_off, int y_off, int z_off, int n_ghost,
Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound, Real dt,
Real gamma, int n_fields, Real dens_floor )
{
int id, xid, yid, zid, n_cells;
int imo, jmo, kmo;
#if defined (DE) || defined(STATIC_GRAV) || defined(GRAVITY)
Real d, d_inv, vx, vy, vz;
#endif
#ifdef DE
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P, E, GE, E_kin;
int ipo, jpo, kpo;
#endif
#ifdef STATIC_GRAV
Real gx, gy, gz, d_n, d_inv_n, vx_n, vy_n, vz_n;
gx = 0.0;
gy = 0.0;
gz = 0.0;
#endif
#ifdef GRAVITY
#ifndef GRAVITY_CPU
Real gx, gy, gz, d_n, d_inv_n, vx_n, vy_n, vz_n;
Real pot_l, pot_r;
int id_l, id_r;
gx = 0.0;
gy = 0.0;
gz = 0.0;
int field_pot;
#ifdef DE
field_pot = n_fields - 2;
#endif //DE
#ifndef DE
field_pot = n_fields - 1;
#endif //DE
#endif //GRAVITY_CPU
#endif //GRAVTY
Real dtodx = dt/dx;
Real dtody = dt/dy;
Real dtodz = dt/dz;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
imo = xid-1 + yid*nx + zid*nx*ny;
jmo = xid + (yid-1)*nx + zid*nx*ny;
kmo = xid + yid*nx + (zid-1)*nx*ny;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
#if defined (DE) || defined(STATIC_GRAV) || defined(GRAVITY)
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
GE = fmin(dev_conserved[(n_fields-1)*n_cells + id], 1e-6);
#endif
#ifdef DE
// P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
// if (P < 0.0) P = dev_conserved[(n_fields-1)*n_cells + id] * (gamma - 1.0);
E_kin = 0.5 * d * ( vx*vx + vy*vy + vz*vz );
P = Get_Pressure_Dual_Energy( E, E - E_kin, GE, gamma );
if (d < 0.0 || d != d) printf("Negative density before final update.\n");
if (P < 0.0) printf("%d Negative pressure before final update.\n", id);
ipo = xid+1 + yid*nx + zid*nx*ny;
jpo = xid + (yid+1)*nx + zid*nx*ny;
kpo = xid + yid*nx + (zid+1)*nx*ny;
vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo];
vz_kmo = dev_conserved[3*n_cells + kmo] / dev_conserved[kmo];
vz_kpo = dev_conserved[3*n_cells + kpo] / dev_conserved[kpo];
#endif
// // update the conserved variable array
dev_conserved[ id] += dtodx * (dev_F_x[ imo] - dev_F_x[ id])
+ dtody * (dev_F_y[ jmo] - dev_F_y[ id])
+ dtodz * (dev_F_z[ kmo] - dev_F_z[ id]);
dev_conserved[ n_cells + id] += dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id])
+ dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id])
+ dtodz * (dev_F_z[ n_cells + kmo] - dev_F_z[ n_cells + id]);
dev_conserved[2*n_cells + id] += dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id])
+ dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id])
+ dtodz * (dev_F_z[2*n_cells + kmo] - dev_F_z[2*n_cells + id]);
dev_conserved[3*n_cells + id] += dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id])
+ dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id])
+ dtodz * (dev_F_z[3*n_cells + kmo] - dev_F_z[3*n_cells + id]);
dev_conserved[4*n_cells + id] += dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id])
+ dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id])
+ dtodz * (dev_F_z[4*n_cells + kmo] - dev_F_z[4*n_cells + id]);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_conserved[(5+i)*n_cells + id] += dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id])
+ dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id])
+ dtodz * (dev_F_z[(5+i)*n_cells + kmo] - dev_F_z[(5+i)*n_cells + id]);
}
#endif
#ifdef DE
dev_conserved[(n_fields-1)*n_cells + id] += dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id])
+ dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id])
+ dtodz * (dev_F_z[(n_fields-1)*n_cells + kmo] - dev_F_z[(n_fields-1)*n_cells + id])
+ 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo) + dtodz*(vz_kmo-vz_kpo));
#endif
#ifdef STATIC_GRAV
calc_g_3D(xid, yid, zid, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound, zbound, &gx, &gy, &gz);
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
vz_n = dev_conserved[3*n_cells + id] * d_inv_n;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[3*n_cells + id] += 0.5*dt*gz*(d + d_n);
dev_conserved[4*n_cells + id] += 0.25*dt*gx*(d + d_n)*(vx + vx_n)
+ 0.25*dt*gy*(d + d_n)*(vy + vy_n)
+ 0.25*dt*gz*(d + d_n)*(vz + vz_n);
#endif
#ifdef DENSITY_FLOOR
if ( dev_conserved[ id] < dens_floor ){
printf("###Thread density change %f -> %f \n", dev_conserved[ id], dens_floor );
dev_conserved[ id] = dens_floor;
}
#endif
#ifdef GRAVITY
#ifndef GRAVITY_CPU
d_n = dev_conserved[ id];
d_inv_n = 1.0 / d_n;
vx_n = dev_conserved[1*n_cells + id] * d_inv_n;
vy_n = dev_conserved[2*n_cells + id] * d_inv_n;
vz_n = dev_conserved[3*n_cells + id] * d_inv_n;
// Calculate the -gradient of potential
// Get X componet of gravity field
id_l = (xid-1) + (yid)*nx + (zid)*nx*ny;
id_r = (xid+1) + (yid)*nx + (zid)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gx = -0.5*( pot_r - pot_l ) / dx;
//Get Y componet of gravity field
id_l = (xid) + (yid-1)*nx + (zid)*nx*ny;
id_r = (xid) + (yid+1)*nx + (zid)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gy = -0.5*( pot_r - pot_l ) / dy;
//Get Z componet of gravity field
id_l = (xid) + (yid)*nx + (zid-1)*nx*ny;
id_r = (xid) + (yid)*nx + (zid+1)*nx*ny;
pot_l = dev_conserved[field_pot*n_cells + id_l];
pot_r = dev_conserved[field_pot*n_cells + id_r];
gz = -0.5*( pot_r - pot_l ) / dz;
dev_conserved[ n_cells + id] += 0.5*dt*gx*(d + d_n);
dev_conserved[2*n_cells + id] += 0.5*dt*gy*(d + d_n);
dev_conserved[3*n_cells + id] += 0.5*dt*gz*(d + d_n);
dev_conserved[4*n_cells + id] += 0.5*dt*gx*(d*vx + d_n*vx_n) + 0.5*dt*gy*(d*vy + d_n*vy_n) + 0.5*dt*gz*(d*vz + d_n*vz_n);;
#endif //GRAVITY_CPU
#endif //GRAVITY
#ifndef TEMPERATURE_FLOOR
if (dev_conserved[id] < 0.0 || dev_conserved[id] != dev_conserved[id] || dev_conserved[4*n_cells + id] < 0.0 || dev_conserved[4*n_cells+id] != dev_conserved[4*n_cells+id]) {
printf("%3d %3d %3d Thread crashed in final update. %e %e %e %e %e\n", xid+x_off, yid+y_off, zid+z_off, dev_conserved[id], dtodx*(dev_F_x[imo]-dev_F_x[id]), dtody*(dev_F_y[jmo]-dev_F_y[id]), dtodz*(dev_F_z[kmo]-dev_F_z[id]), dev_conserved[4*n_cells+id]);
}
#endif
/*
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
if (P < 0.0) printf("%3d %3d %3d Negative pressure after final update. %f %f %f %f %f\n", xid, yid, zid, dev_conserved[4*n_cells + id], 0.5*d*vx*vx, 0.5*d*vy*vy, 0.5*d*vz*vz, P);
*/
}
}
__global__ void Sync_Energies_1D(Real *dev_conserved, int n_cells, int n_ghost, Real gamma, int n_fields)
{
int id;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int im1, ip1;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
im1 = max(id-1, n_ghost);
ip1 = min(id+1, n_cells-n_ghost-1);
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
// find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + im1], E);
Emax = fmax(dev_conserved[4*n_cells + ip1], Emax);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
dev_conserved[4*n_cells + id] += ge1 - ge2;
}
/*
// if the conservatively calculated internal energy is greater than the estimate of the truncation error,
// use the internal energy computed from the total energy to do the update
//find the max nearby velocity difference (estimate of truncation error)
vmax = fmax(fabs(vx-dev_conserved[1*n_cells + im1]/dev_conserved[im1]), fabs(dev_conserved[1*n_cells + ip1]/dev_conserved[ip1]-vx));
//printf("%3d %f %f %f %f\n", id, ge1, ge2, vmax, 0.25*d*vmax*vmax);
if (ge2 > 0.25*d*vmax*vmax) {
dev_conserved[5*n_cells + id] = ge2;
ge1 = ge2;
}
//else printf("%d Using ge1 %f %f %f %f\n", id, ge1, ge2, vmax, 0.25*d*vmax*vmax);
*/
// calculate the pressure
//P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%d Negative pressure after internal energy sync. %f %f \n", id, ge1, ge2);
}
}
__global__ void Sync_Energies_2D(Real *dev_conserved, int nx, int ny, int n_ghost, Real gamma, int n_fields)
{
int id, xid, yid, n_cells;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int imo, ipo, jmo, jpo;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
imo = max(xid-1, n_ghost);
imo = imo + yid*nx;
ipo = min(xid+1, nx-n_ghost-1);
ipo = ipo + yid*nx;
jmo = max(yid-1, n_ghost);
jmo = xid + jmo*nx;
jpo = min(yid+1, ny-n_ghost-1);
jpo = xid + jpo*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
//find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + imo], E);
Emax = fmax(Emax, dev_conserved[4*n_cells + ipo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jpo]);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
dev_conserved[4*n_cells + id] += ge1 - ge2;
}
// calculate the pressure
//Real P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%d Negative pressure after internal energy sync. %f %f \n", id, ge1, ge2);
}
}
__global__ void Sync_Energies_3D(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, Real gamma, int n_fields)
{
int id, xid, yid, zid, n_cells;
Real d, d_inv, vx, vy, vz, E;
Real ge1, ge2, Emax;
int imo, ipo, jmo, jpo, kmo, kpo;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
imo = max(xid-1, n_ghost);
imo = imo + yid*nx + zid*nx*ny;
ipo = min(xid+1, nx-n_ghost-1);
ipo = ipo + yid*nx + zid*nx*ny;
jmo = max(yid-1, n_ghost);
jmo = xid + jmo*nx + zid*nx*ny;
jpo = min(yid+1, ny-n_ghost-1);
jpo = xid + jpo*nx + zid*nx*ny;
kmo = max(zid-1, n_ghost);
kmo = xid + yid*nx + kmo*nx*ny;
kpo = min(zid+1, nz-n_ghost-1);
kpo = xid + yid*nx + kpo*nx*ny;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
// don't do the energy sync if this thread has crashed
if (E < 0.0 || E != E) return;
// separately tracked internal energy
ge1 = dev_conserved[(n_fields-1)*n_cells + id];
// internal energy calculated from total energy
ge2 = dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz);
// if the ratio of conservatively calculated internal energy to total energy
// is greater than 1/1000, use the conservatively calculated internal energy
// to do the internal energy update
if (ge2 > 0.0 && E > 0.0 && ge2/E > 0.001) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
ge1 = ge2;
}
//find the max nearby total energy
Emax = fmax(dev_conserved[4*n_cells + imo], E);
Emax = fmax(Emax, dev_conserved[4*n_cells + ipo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + jpo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + kmo]);
Emax = fmax(Emax, dev_conserved[4*n_cells + kpo]);
// if the ratio of conservatively calculated internal energy to max nearby total energy
// is greater than 1/10, continue to use the conservatively calculated internal energy
if (ge2/Emax > 0.1 && ge2 > 0.0 && Emax > 0.0) {
dev_conserved[(n_fields-1)*n_cells + id] = ge2;
}
// sync the total energy with the internal energy
else {
if (ge1 > 0.0) dev_conserved[4*n_cells + id] += ge1 - ge2;
else dev_conserved[(n_fields-1)*n_cells+id] = ge2;
}
// calculate the pressure
//Real P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
//if (P < 0.0) printf("%3d %3d %3d Negative pressure after internal energy sync. %f %f %f\n", xid, yid, zid, P/(gamma-1.0), ge1, ge2);
}
}
#ifdef COSMOLOGY
__global__ void Apply_Temperature_Floor(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, int n_fields, Real temp_floor )
{
int id, xid, yid, zid, n_cells;
// Real d, d_inv, vx, vy, vz, P, E;
// Real ge1, ge2, Emax;
// int imo, ipo, jmo, jpo, kmo, kpo;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
Real dens, u;
dens = dev_conserved[ id];
u = dev_conserved[(n_fields-1)*n_cells + id];
Real temp = u / dens;
Real u_new, delta_u;
if ( temp < temp_floor ){
temp = temp_floor;
u_new = temp * dens ;
delta_u = u_new - u;
// delta_u = delta_u / vel_0 / vel_0 * current_a * current_a;
dev_conserved[(n_fields-1)*n_cells + id] += delta_u;
dev_conserved[4*n_cells + id] += delta_u;
}
}
}
#endif
__global__ void Calc_dt_1D(Real *dev_conserved, int n_cells, int n_ghost, Real dx, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, P, cs;
int id, tid;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (id > n_ghost - 1 && id < n_cells-n_ghost)
{
// start timestep calculation here
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
P = fmax(P, (Real) TINY_NUMBER);
// find the max wavespeed in that cell, use it to calculate the inverse timestep
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = (fabs(vx)+cs)/dx;
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockIdx.x] = max_dti[0];
}
__global__ void Calc_dt_2D(Real *dev_conserved, int nx, int ny, int n_ghost, Real dx, Real dy, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, P, cs;
int id, tid, xid, yid, n_cells;
n_cells = nx*ny;
// get a global thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
id = threadIdx.x + blockId * blockDim.x;
yid = id / nx;
xid = id - yid*nx;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
P = fmax(P, (Real) 1.0e-20);
// find the max wavespeed in that cell, use it to calculate the inverse timestep
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = fmax((fabs(vx)+cs)/dx, (fabs(vy)+cs)/dy);
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockId] = max_dti[0];
}
__global__ void Calc_dt_3D(Real *dev_conserved, int nx, int ny, int nz, int n_ghost, Real dx, Real dy, Real dz, Real *dti_array, Real gamma)
{
__shared__ Real max_dti[TPB];
Real d, d_inv, vx, vy, vz, E, P, cs;
int id, xid, yid, zid, n_cells;
int tid;
n_cells = nx*ny*nz;
// get a global thread ID
id = threadIdx.x + blockIdx.x * blockDim.x;
zid = id / (nx*ny);
yid = (id - zid*nx*ny) / nx;
xid = id - zid*nx*ny - yid*nx;
// and a thread id within the block
tid = threadIdx.x;
// set shared memory to 0
max_dti[tid] = 0;
__syncthreads();
// threads corresponding to real cells do the calculation
if (xid > n_ghost-1 && xid < nx-n_ghost && yid > n_ghost-1 && yid < ny-n_ghost && zid > n_ghost-1 && zid < nz-n_ghost)
{
// every thread collects the conserved variables it needs from global memory
d = dev_conserved[ id];
d_inv = 1.0 / d;
vx = dev_conserved[1*n_cells + id] * d_inv;
vy = dev_conserved[2*n_cells + id] * d_inv;
vz = dev_conserved[3*n_cells + id] * d_inv;
E = dev_conserved[4*n_cells + id];
P = (E - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0);
cs = sqrt(d_inv * gamma * P);
max_dti[tid] = fmax((fabs(vx)+cs)/dx, (fabs(vy)+cs)/dy);
max_dti[tid] = fmax(max_dti[tid], (fabs(vz)+cs)/dz);
max_dti[tid] = fmax(max_dti[tid], 0);
}
__syncthreads();
// do the reduction in shared memory (find the max inverse timestep in the block)
for (unsigned int s=1; s<blockDim.x; s*=2) {
if (tid % (2*s) == 0) {
max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]);
}
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) dti_array[blockIdx.x] = max_dti[0];
}
#ifdef DE
__device__ Real Get_Pressure_Dual_Energy( Real E, Real U_total, Real U_advected, Real gamma ){
Real U, P;
Real eta = 0.001;
if( U_total / E > eta ) U = U_total;
else U = U_advected;
P = U * (gamma - 1.0);
return P;
}
#endif //DE
#endif //CUDA
|
5f613f7079da034f6495e3ee55f78fe44aeb1ddb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "batcherBitonicMergesort64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
const float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
batcherBitonicMergesort64), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
batcherBitonicMergesort64), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
batcherBitonicMergesort64), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5f613f7079da034f6495e3ee55f78fe44aeb1ddb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "batcherBitonicMergesort64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
const float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
batcherBitonicMergesort64<<<gridBlock,threadBlock>>>(d_out,d_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
batcherBitonicMergesort64<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
batcherBitonicMergesort64<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4f6edf48c4aa47f2aea1ae1c4cb3f875557ec735.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star2d2r-512-8-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
| 4f6edf48c4aa47f2aea1ae1c4cb3f875557ec735.cu | #include "star2d2r-512-8-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
33108b200692a1c0afd28a4652a03fd642470809.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "downscale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput_data = NULL;
hipMalloc(&gradInput_data, XSIZE*YSIZE);
const float *gradOutput_data = NULL;
hipMalloc(&gradOutput_data, XSIZE*YSIZE);
long no_elements = 1;
int scale_factor = 2;
int d1 = 2;
int d2 = 2;
int d3 = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
downscale), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
downscale), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
downscale), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 33108b200692a1c0afd28a4652a03fd642470809.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "downscale.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput_data = NULL;
cudaMalloc(&gradInput_data, XSIZE*YSIZE);
const float *gradOutput_data = NULL;
cudaMalloc(&gradOutput_data, XSIZE*YSIZE);
long no_elements = 1;
int scale_factor = 2;
int d1 = 2;
int d2 = 2;
int d3 = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
downscale<<<gridBlock,threadBlock>>>(gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
downscale<<<gridBlock,threadBlock>>>(gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
downscale<<<gridBlock,threadBlock>>>(gradInput_data,gradOutput_data,no_elements,scale_factor,d1,d2,d3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ff5fe6f333b9bb4336323f8273ad90482ea46406.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <exception>
#include <stdexcept>
#include <vector>
#include "device.h"
using std::runtime_error;
using std::vector;
// Cache number of devices
static int deviceCount = -1;
// Cache device properties
static vector<hipDeviceProp_t> deviceProperties;
static void loadDeviceData()
{
hipError_t err;
err = hipGetDeviceCount(&deviceCount);
if (err != hipSuccess)
{
throw runtime_error(hipGetErrorString(err));
}
deviceProperties.reserve(deviceCount);
for (int device = 0; device < deviceCount; ++device)
{
hipDeviceProp_t prop;
err = hipGetDeviceProperties(&prop, device);
if (err != hipSuccess)
{
throw runtime_error(hipGetErrorString(err));
}
deviceProperties.push_back(prop);
}
}
bool isDeviceValid(int device)
{
if (deviceCount < 0)
{
loadDeviceData();
}
if (device < 0 || device >= deviceCount)
{
return false;
}
const hipDeviceProp_t& prop = deviceProperties[device];
if (prop.computeMode == hipComputeModeProhibited)
{
return false;
}
return true;
}
int countDevices()
{
if (deviceCount < 0)
{
loadDeviceData();
}
return deviceCount;
}
void loadDeviceProperties(int device, hipDeviceProp_t& prop)
{
if (deviceCount < 0)
{
loadDeviceData();
}
prop = deviceProperties[device];
}
| ff5fe6f333b9bb4336323f8273ad90482ea46406.cu | #include <cuda.h>
#include <exception>
#include <stdexcept>
#include <vector>
#include "device.h"
using std::runtime_error;
using std::vector;
// Cache number of devices
static int deviceCount = -1;
// Cache device properties
static vector<cudaDeviceProp> deviceProperties;
static void loadDeviceData()
{
cudaError_t err;
err = cudaGetDeviceCount(&deviceCount);
if (err != cudaSuccess)
{
throw runtime_error(cudaGetErrorString(err));
}
deviceProperties.reserve(deviceCount);
for (int device = 0; device < deviceCount; ++device)
{
cudaDeviceProp prop;
err = cudaGetDeviceProperties(&prop, device);
if (err != cudaSuccess)
{
throw runtime_error(cudaGetErrorString(err));
}
deviceProperties.push_back(prop);
}
}
bool isDeviceValid(int device)
{
if (deviceCount < 0)
{
loadDeviceData();
}
if (device < 0 || device >= deviceCount)
{
return false;
}
const cudaDeviceProp& prop = deviceProperties[device];
if (prop.computeMode == cudaComputeModeProhibited)
{
return false;
}
return true;
}
int countDevices()
{
if (deviceCount < 0)
{
loadDeviceData();
}
return deviceCount;
}
void loadDeviceProperties(int device, cudaDeviceProp& prop)
{
if (deviceCount < 0)
{
loadDeviceData();
}
prop = deviceProperties[device];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.