hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
6bc52933d79ac06cdb3f53fe16124808363f893c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
#include <vector>
#include <chrono>
#include <memory>
#include <hip/hip_cooperative_groups.h>
#include "curves.cu"
// C is the size of the precomputation
// R is the number of points we're handling per thread
template< typename EC, int C = 4, int RR = 8 >
__global__ void
ec_multiexp_straus(var *out, const var *multiples_, const var *scalars_, size_t N)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
size_t n = (N + RR - 1) / RR;
if (idx < n) {
// TODO: Treat remainder separately so R can remain a compile time constant
size_t R = (idx < n - 1) ? RR : (N % RR);
if (R == 0) R = 1;
typedef typename EC::group_type Fr;
static constexpr int JAC_POINT_LIMBS = 3 * EC::field_type::DEGREE * ELT_LIMBS;
static constexpr int AFF_POINT_LIMBS = 2 * EC::field_type::DEGREE * ELT_LIMBS;
int out_off = idx * JAC_POINT_LIMBS;
int m_off = idx * RR * AFF_POINT_LIMBS;
int s_off = idx * RR * ELT_LIMBS;
Fr scalars[RR];
for (int j = 0; j < R; ++j) {
Fr::load(scalars[j], scalars_ + s_off + j*ELT_LIMBS);
Fr::from_monty(scalars[j], scalars[j]);
}
const var *multiples = multiples_ + m_off;
// TODO: Consider loading multiples and/or scalars into shared memory
// i is smallest multiple of C such that i > 254
int i = C * ((254 + C - 1) / C); // C * ceiling(254/C)
assert((i - C * 254) < C);
static constexpr var C_MASK = (1U << C) - 1U;
EC x;
EC::set_zero(x);
size_t set_value = 0;
while (i >= C) {
if (set_value)
EC::mul_2exp<C>(T, x, x);
i -= C;
int q = i / digit::BITS, r = i % digit::BITS;
for (int j = 0; j < R; ++j) {
//(scalars[j][q] >> r) & C_MASK
auto g = fixnum::layout();
var s = g.shfl(scalars[j].a, q);
var win = (s >> r) & C_MASK;
// Handle case where C doesn't divide digit::BITS
int bottom_bits = digit::BITS - r;
// detect when window overlaps digit boundary
if (bottom_bits < C) {
s = g.shfl(scalars[j].a, q + 1);
win |= (s << bottom_bits) & C_MASK;
}
if (win > 0) {
EC m;
set_value = 1;
//EC::add(x, x, multiples[win - 1][j]);
EC::load_affine(m, multiples + ((win-1)*N + j)*AFF_POINT_LIMBS);
EC::mixed_add(T, x, x, m);
}
}
}
EC::store_jac(T, out + out_off, x);
}
}
template< typename EC >
__global__ void
ec_multiexp(var *X, const var *W, size_t n)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
if (idx < n) {
typedef typename EC::group_type Fr;
EC x;
Fr w;
int x_off = idx * EC::NELTS * ELT_LIMBS;
int w_off = idx * ELT_LIMBS;
EC::load_affine(x, X + x_off);
Fr::load(w, W + w_off);
// We're given W in Monty form for some reason, so undo that.
Fr::from_monty(w, w);
EC::mul(x, w.a, x);
EC::store_jac(T, X + x_off, x);
}
}
template< typename EC >
__global__ void
ec_sum_all(var *X, const var *Y, size_t n)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
if (idx < n) {
EC z, x, y;
int off = idx * EC::NELTS * ELT_LIMBS;
EC::load_jac(x, X + off);
EC::load_jac(y, Y + off);
EC::add(T, z, x, y);
EC::store_jac(T, X + off, z);
}
}
static constexpr size_t threads_per_block = 256;
template< typename EC, int C, int R >
void
ec_reduce_straus(hipStream_t &strm, var *out, const var *multiples, const var *scalars, size_t N)
{
hipStreamCreate(&strm);
static constexpr size_t pt_limbs = EC::NELTS * ELT_LIMBS;
size_t n = (N + R - 1) / R;
size_t nblocks = (n * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( ec_multiexp_straus<EC, C, R>), dim3(nblocks), dim3(threads_per_block), 0, strm, out, multiples, scalars, N);
size_t r = n & 1, m = n / 2;
for ( ; m != 0; r = m & 1, m >>= 1) {
nblocks = (m * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( ec_sum_all<EC>), dim3(nblocks), dim3(threads_per_block), 0, strm, out, out + m*pt_limbs, m);
if (r)
hipLaunchKernelGGL(( ec_sum_all<EC>), dim3(1), dim3(threads_per_block), 0, strm, out, out + 2*m*pt_limbs, 1);
}
}
template< typename EC >
void
ec_reduce(hipStream_t &strm, var *X, const var *w, size_t n)
{
hipStreamCreate(&strm);
size_t nblocks = (n * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
// FIXME: Only works on Pascal and later.
//auto grid = cg::this_grid();
hipLaunchKernelGGL(( ec_multiexp<EC>), dim3(nblocks), dim3(threads_per_block), 0, strm, X, w, n);
static constexpr size_t pt_limbs = EC::NELTS * ELT_LIMBS;
size_t r = n & 1, m = n / 2;
for ( ; m != 0; r = m & 1, m >>= 1) {
nblocks = (m * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( ec_sum_all<EC>), dim3(nblocks), dim3(threads_per_block), 0, strm, X, X + m*pt_limbs, m);
if (r)
hipLaunchKernelGGL(( ec_sum_all<EC>), dim3(1), dim3(threads_per_block), 0, strm, X, X + 2*m*pt_limbs, 1);
// TODO: Not sure this is really necessary.
//grid.sync();
}
}
static inline double as_mebibytes(size_t n) {
return n / (long double)(1UL << 20);
}
void print_meminfo(size_t allocated) {
size_t free_mem, dev_mem;
hipMemGetInfo(&free_mem, &dev_mem);
fprintf(stderr, "Allocated %zu bytes; device has %.1f MiB free (%.1f%%).\n",
allocated,
as_mebibytes(free_mem),
100.0 * free_mem / dev_mem);
}
struct CudaFree {
void operator()(var *mem) { hipFree(mem); }
};
typedef std::unique_ptr<var, CudaFree> var_ptr;
var_ptr
allocate_memory(size_t nbytes, int dbg = 0) {
var *mem = nullptr;
hipMallocManaged(&mem, nbytes);
if (mem == nullptr) {
fprintf(stderr, "Failed to allocate enough device memory\n");
abort();
}
if (dbg)
print_meminfo(nbytes);
return var_ptr(mem);
}
var_ptr
load_scalars(size_t n, FILE *inputs)
{
static constexpr size_t scalar_bytes = ELT_BYTES;
size_t total_bytes = n * scalar_bytes;
auto mem = allocate_memory(total_bytes);
if (fread((void *)mem.get(), total_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read scalars\n");
abort();
}
return mem;
}
template< typename EC >
var_ptr
load_points(size_t n, FILE *inputs)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
static constexpr size_t jac_pt_bytes = 3 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
size_t total_jac_bytes = n * jac_pt_bytes;
auto mem = allocate_memory(total_jac_bytes);
if (fread((void *)mem.get(), total_aff_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read all curve poinst\n");
abort();
}
// insert space for z-coordinates
char *cmem = reinterpret_cast<char *>(mem.get()); //lazy
for (size_t i = n - 1; i > 0; --i) {
char tmp_pt[aff_pt_bytes];
memcpy(tmp_pt, cmem + i * aff_pt_bytes, aff_pt_bytes);
memcpy(cmem + i * jac_pt_bytes, tmp_pt, aff_pt_bytes);
}
return mem;
}
template< typename EC >
var_ptr
load_points_affine(size_t n, FILE *inputs)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
auto mem = allocate_memory(total_aff_bytes);
if (fread((void *)mem.get(), total_aff_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read all curve poinst\n");
abort();
}
return mem;
}
| 6bc52933d79ac06cdb3f53fe16124808363f893c.cu | #include <cstdint>
#include <vector>
#include <chrono>
#include <memory>
#include <cooperative_groups.h>
#include "curves.cu"
// C is the size of the precomputation
// R is the number of points we're handling per thread
template< typename EC, int C = 4, int RR = 8 >
__global__ void
ec_multiexp_straus(var *out, const var *multiples_, const var *scalars_, size_t N)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
size_t n = (N + RR - 1) / RR;
if (idx < n) {
// TODO: Treat remainder separately so R can remain a compile time constant
size_t R = (idx < n - 1) ? RR : (N % RR);
if (R == 0) R = 1;
typedef typename EC::group_type Fr;
static constexpr int JAC_POINT_LIMBS = 3 * EC::field_type::DEGREE * ELT_LIMBS;
static constexpr int AFF_POINT_LIMBS = 2 * EC::field_type::DEGREE * ELT_LIMBS;
int out_off = idx * JAC_POINT_LIMBS;
int m_off = idx * RR * AFF_POINT_LIMBS;
int s_off = idx * RR * ELT_LIMBS;
Fr scalars[RR];
for (int j = 0; j < R; ++j) {
Fr::load(scalars[j], scalars_ + s_off + j*ELT_LIMBS);
Fr::from_monty(scalars[j], scalars[j]);
}
const var *multiples = multiples_ + m_off;
// TODO: Consider loading multiples and/or scalars into shared memory
// i is smallest multiple of C such that i > 254
int i = C * ((254 + C - 1) / C); // C * ceiling(254/C)
assert((i - C * 254) < C);
static constexpr var C_MASK = (1U << C) - 1U;
EC x;
EC::set_zero(x);
size_t set_value = 0;
while (i >= C) {
if (set_value)
EC::mul_2exp<C>(T, x, x);
i -= C;
int q = i / digit::BITS, r = i % digit::BITS;
for (int j = 0; j < R; ++j) {
//(scalars[j][q] >> r) & C_MASK
auto g = fixnum::layout();
var s = g.shfl(scalars[j].a, q);
var win = (s >> r) & C_MASK;
// Handle case where C doesn't divide digit::BITS
int bottom_bits = digit::BITS - r;
// detect when window overlaps digit boundary
if (bottom_bits < C) {
s = g.shfl(scalars[j].a, q + 1);
win |= (s << bottom_bits) & C_MASK;
}
if (win > 0) {
EC m;
set_value = 1;
//EC::add(x, x, multiples[win - 1][j]);
EC::load_affine(m, multiples + ((win-1)*N + j)*AFF_POINT_LIMBS);
EC::mixed_add(T, x, x, m);
}
}
}
EC::store_jac(T, out + out_off, x);
}
}
template< typename EC >
__global__ void
ec_multiexp(var *X, const var *W, size_t n)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
if (idx < n) {
typedef typename EC::group_type Fr;
EC x;
Fr w;
int x_off = idx * EC::NELTS * ELT_LIMBS;
int w_off = idx * ELT_LIMBS;
EC::load_affine(x, X + x_off);
Fr::load(w, W + w_off);
// We're given W in Monty form for some reason, so undo that.
Fr::from_monty(w, w);
EC::mul(x, w.a, x);
EC::store_jac(T, X + x_off, x);
}
}
template< typename EC >
__global__ void
ec_sum_all(var *X, const var *Y, size_t n)
{
int T = threadIdx.x, B = blockIdx.x, D = blockDim.x;
int elts_per_block = D / BIG_WIDTH;
int tileIdx = T / BIG_WIDTH;
int idx = elts_per_block * B + tileIdx;
if (idx < n) {
EC z, x, y;
int off = idx * EC::NELTS * ELT_LIMBS;
EC::load_jac(x, X + off);
EC::load_jac(y, Y + off);
EC::add(T, z, x, y);
EC::store_jac(T, X + off, z);
}
}
static constexpr size_t threads_per_block = 256;
template< typename EC, int C, int R >
void
ec_reduce_straus(cudaStream_t &strm, var *out, const var *multiples, const var *scalars, size_t N)
{
cudaStreamCreate(&strm);
static constexpr size_t pt_limbs = EC::NELTS * ELT_LIMBS;
size_t n = (N + R - 1) / R;
size_t nblocks = (n * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
ec_multiexp_straus<EC, C, R><<< nblocks, threads_per_block, 0, strm>>>(out, multiples, scalars, N);
size_t r = n & 1, m = n / 2;
for ( ; m != 0; r = m & 1, m >>= 1) {
nblocks = (m * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
ec_sum_all<EC><<<nblocks, threads_per_block, 0, strm>>>(out, out + m*pt_limbs, m);
if (r)
ec_sum_all<EC><<<1, threads_per_block, 0, strm>>>(out, out + 2*m*pt_limbs, 1);
}
}
template< typename EC >
void
ec_reduce(cudaStream_t &strm, var *X, const var *w, size_t n)
{
cudaStreamCreate(&strm);
size_t nblocks = (n * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
// FIXME: Only works on Pascal and later.
//auto grid = cg::this_grid();
ec_multiexp<EC><<< nblocks, threads_per_block, 0, strm>>>(X, w, n);
static constexpr size_t pt_limbs = EC::NELTS * ELT_LIMBS;
size_t r = n & 1, m = n / 2;
for ( ; m != 0; r = m & 1, m >>= 1) {
nblocks = (m * BIG_WIDTH + threads_per_block - 1) / threads_per_block;
ec_sum_all<EC><<<nblocks, threads_per_block, 0, strm>>>(X, X + m*pt_limbs, m);
if (r)
ec_sum_all<EC><<<1, threads_per_block, 0, strm>>>(X, X + 2*m*pt_limbs, 1);
// TODO: Not sure this is really necessary.
//grid.sync();
}
}
static inline double as_mebibytes(size_t n) {
return n / (long double)(1UL << 20);
}
void print_meminfo(size_t allocated) {
size_t free_mem, dev_mem;
cudaMemGetInfo(&free_mem, &dev_mem);
fprintf(stderr, "Allocated %zu bytes; device has %.1f MiB free (%.1f%%).\n",
allocated,
as_mebibytes(free_mem),
100.0 * free_mem / dev_mem);
}
struct CudaFree {
void operator()(var *mem) { cudaFree(mem); }
};
typedef std::unique_ptr<var, CudaFree> var_ptr;
var_ptr
allocate_memory(size_t nbytes, int dbg = 0) {
var *mem = nullptr;
cudaMallocManaged(&mem, nbytes);
if (mem == nullptr) {
fprintf(stderr, "Failed to allocate enough device memory\n");
abort();
}
if (dbg)
print_meminfo(nbytes);
return var_ptr(mem);
}
var_ptr
load_scalars(size_t n, FILE *inputs)
{
static constexpr size_t scalar_bytes = ELT_BYTES;
size_t total_bytes = n * scalar_bytes;
auto mem = allocate_memory(total_bytes);
if (fread((void *)mem.get(), total_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read scalars\n");
abort();
}
return mem;
}
template< typename EC >
var_ptr
load_points(size_t n, FILE *inputs)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
static constexpr size_t jac_pt_bytes = 3 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
size_t total_jac_bytes = n * jac_pt_bytes;
auto mem = allocate_memory(total_jac_bytes);
if (fread((void *)mem.get(), total_aff_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read all curve poinst\n");
abort();
}
// insert space for z-coordinates
char *cmem = reinterpret_cast<char *>(mem.get()); //lazy
for (size_t i = n - 1; i > 0; --i) {
char tmp_pt[aff_pt_bytes];
memcpy(tmp_pt, cmem + i * aff_pt_bytes, aff_pt_bytes);
memcpy(cmem + i * jac_pt_bytes, tmp_pt, aff_pt_bytes);
}
return mem;
}
template< typename EC >
var_ptr
load_points_affine(size_t n, FILE *inputs)
{
typedef typename EC::field_type FF;
static constexpr size_t coord_bytes = FF::DEGREE * ELT_BYTES;
static constexpr size_t aff_pt_bytes = 2 * coord_bytes;
size_t total_aff_bytes = n * aff_pt_bytes;
auto mem = allocate_memory(total_aff_bytes);
if (fread((void *)mem.get(), total_aff_bytes, 1, inputs) < 1) {
fprintf(stderr, "Failed to read all curve poinst\n");
abort();
}
return mem;
}
|
17ed68fb85d4429556348af271e07f98797414da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentMaxLinearKernel(void *input, Nd4jLong const* inputShape, int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, Nd4jLong const* outputShape) {
__shared__ T *val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T *x;
__shared__ T *z;
__shared__ int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
extern __shared__ unsigned char shmem[];
val = reinterpret_cast<T *>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentMaxLinearKernel(void *input, Nd4jLong const* inputShape, void *indices, Nd4jLong const* indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses, void *output,
Nd4jLong const* outputShape) {
__shared__ T *val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T *x;
__shared__ T *z;
__shared__ I *y; //int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
//start = starts[segment];
//finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = -DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment) {
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxTadKernel(void* inputBuf, Nd4jLong const* inputShape, Nd4jLong const* inputTads,
Nd4jLong const* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf,
Nd4jLong const* outputShape, Nd4jLong const* outputTads, Nd4jLong const* outputTadOffsets, T filler = 0) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
__shared__ I segment;
if (threadIdx.x == 0) {
segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (idx <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
//z[zIndex] = x[xIndex];
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment])
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentMaxFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
//int numClasses = output->sizeAt(0);
// if input is a vector: (as if in doc sample)
//Nd4jLong idx = indices->e<Nd4jLong>(0);
output->assign(-DataTypeUtils::infOrMax<T>());
auto stream = context->getCudaStream();
indices->syncToHost();
Nd4jLong numOfClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(256, 512, 256);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
if (input->isVector()) {
hipLaunchKernelGGL(( segmentMaxLinearKernel<T,I>), dim3(numOfClasses), dim3(input->lengthOf()), numOfClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentMaxTadKernel<T,I>), dim3(packX.numberOfTads()), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMaxFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMaxFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMaxFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
output->assign(DataTypeUtils::infOrMax<T>());
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), row, classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentMaxLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
output->assign(-DataTypeUtils::max<T>());
hipLaunchKernelGGL(( segmentMaxTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMaxFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// segment max
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxBPLinearKernel(void* inputBuf, Nd4jLong const* inputShape, void* forwardOutput,
Nd4jLong const* forwardShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
void* outputBuf, Nd4jLong const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape);
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
if (sd::math::nd4j_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) {
z[zOffset] = gradOut[gradOffsetO];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxBPTadKernel(void* inputBuf, Nd4jLong const* inputShape, void* forwardOutput,
Nd4jLong const* forwardShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
void* outputBuf, Nd4jLong const* outputShape,Nd4jLong const* inputTad,
Nd4jLong const* inputOffsets, Nd4jLong const* gradInTad, Nd4jLong const* gradInOffsets,
Nd4jLong const* gradOutTad, Nd4jLong const* gradOutOffsets, Nd4jLong const* outTad,
Nd4jLong const* outOffsets) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
T* current = x + inputOffsets[i];
T* currentOut = z + outOffsets[i];
T* in = gradIn + gradInOffsets[segment];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
if (sd::math::nd4j_abs(in[e] - current[e]) <= T(1.e-6))
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentMaxFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
//int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context);
segmentMaxFunctor_<T, I>(context, input, indices, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentMaxBPLinearKernel<T,I>), dim3(1 + gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradInTads = packGradIn.specialShapeInfo();
Nd4jLong const* gradInTadOffsets = packGradIn.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMaxBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentMaxFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMaxFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
//int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context);
unsortedSegmentMaxFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentMaxBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradInTads = packGradIn.specialShapeInfo();
Nd4jLong const* gradInTadOffsets = packGradIn.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMaxBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentMaxFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMaxFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} | 17ed68fb85d4429556348af271e07f98797414da.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentMaxLinearKernel(void *input, Nd4jLong const* inputShape, int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, Nd4jLong const* outputShape) {
__shared__ T *val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T *x;
__shared__ T *z;
__shared__ int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
extern __shared__ unsigned char shmem[];
val = reinterpret_cast<T *>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentMaxLinearKernel(void *input, Nd4jLong const* inputShape, void *indices, Nd4jLong const* indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses, void *output,
Nd4jLong const* outputShape) {
__shared__ T *val;
__shared__ Nd4jLong xLen, zLen, zIndex;
__shared__ T *x;
__shared__ T *z;
__shared__ I *y; //int threadsPerSegment, start, finish;
auto segment = blockIdx.x;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
//start = starts[segment];
//finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = -DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment) {
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxTadKernel(void* inputBuf, Nd4jLong const* inputShape, Nd4jLong const* inputTads,
Nd4jLong const* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf,
Nd4jLong const* outputShape, Nd4jLong const* outputTads, Nd4jLong const* outputTadOffsets, T filler = 0) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
__shared__ I segment;
if (threadIdx.x == 0) {
segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (idx <= total) {
auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
//z[zIndex] = x[xIndex];
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment])
sd::math::atomics::nd4j_atomicMax(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentMaxFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
//int numClasses = output->sizeAt(0);
// if input is a vector: (as if in doc sample)
//Nd4jLong idx = indices->e<Nd4jLong>(0);
output->assign(-DataTypeUtils::infOrMax<T>());
auto stream = context->getCudaStream();
indices->syncToHost();
Nd4jLong numOfClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(256, 512, 256);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
if (input->isVector()) {
segmentMaxLinearKernel<T,I><<<numOfClasses, input->lengthOf(), numOfClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentMaxTadKernel<T,I><<<packX.numberOfTads(), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMaxFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMaxFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMaxFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
output->assign(DataTypeUtils::infOrMax<T>());
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), row, classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentMaxLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
output->assign(-DataTypeUtils::max<T>());
segmentMaxTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMaxFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMaxFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// segment max
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxBPLinearKernel(void* inputBuf, Nd4jLong const* inputShape, void* forwardOutput,
Nd4jLong const* forwardShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
void* outputBuf, Nd4jLong const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape);
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
if (sd::math::nd4j_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) {
z[zOffset] = gradOut[gradOffsetO];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentMaxBPTadKernel(void* inputBuf, Nd4jLong const* inputShape, void* forwardOutput,
Nd4jLong const* forwardShape, void* eps, Nd4jLong const* epsShape, void* indicesBuf, Nd4jLong const* indicesShape,
void* outputBuf, Nd4jLong const* outputShape,Nd4jLong const* inputTad,
Nd4jLong const* inputOffsets, Nd4jLong const* gradInTad, Nd4jLong const* gradInOffsets,
Nd4jLong const* gradOutTad, Nd4jLong const* gradOutOffsets, Nd4jLong const* outTad,
Nd4jLong const* outOffsets) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradIn = reinterpret_cast<T*>(forwardOutput);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
T* current = x + inputOffsets[i];
T* currentOut = z + outOffsets[i];
T* in = gradIn + gradInOffsets[segment];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
if (sd::math::nd4j_abs(in[e] - current[e]) <= T(1.e-6))
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentMaxFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
//int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context);
segmentMaxFunctor_<T, I>(context, input, indices, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentMaxBPLinearKernel<T,I><<<1 + gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradInTads = packGradIn.specialShapeInfo();
Nd4jLong const* gradInTadOffsets = packGradIn.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMaxBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentMaxFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMaxFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentMaxFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
//int numOfClasses = gradOut->sizeAt(0);
// if input is a vector: (as if in doc sample)
auto stream = context->getCudaStream();
NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context);//->shapeInfo(), context);
unsortedSegmentMaxFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes);
NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentMaxBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
Nd4jLong const* inputTads = packX.specialShapeInfo();
Nd4jLong const* inputTadOffsets = packX.specialOffsets();
Nd4jLong const* outputTads = packZ.specialShapeInfo();
Nd4jLong const* outputTadOffsets = packZ.specialOffsets();
Nd4jLong const* gradInTads = packGradIn.specialShapeInfo();
Nd4jLong const* gradInTadOffsets = packGradIn.specialOffsets();
Nd4jLong const* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMaxBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentMaxFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMaxFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} |
82e49b6453335585c3980bc5e14c1cd9b731c6a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <limits>
using namespace std;
#define int8_t char
#define int16_t short
#define int32_t int
#define int64_t long long
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0)
hipEvent_t start, stop;
float t;
void time_start() {
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
CSC(hipEventRecord(start, 0));
}
void time_end() {
CSC(hipGetLastError());
CSC(hipEventRecord(stop, 0));
CSC(hipEventSynchronize(stop));
CSC(hipEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(stop));
}
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = 16384;
//const int64_t INF = 2100000000;
const int64_t INF = 2147483647;
__global__ void k_blocksort(int64_t *arr, int len) {
if (blockIdx.x * BLOCK_SIZE >= len) {
return;
}
__shared__ int64_t block[BLOCK_SIZE];
int idx = threadIdx.x * 2;
int offset = blockIdx.x * BLOCK_SIZE;
int nstep = BLOCK_SIZE / 2;
// printf("offset %d", offset);
block[idx] = arr[offset + idx];
block[idx+1] = arr[offset + idx+1];
int64_t tmp;
// __syncthreads();
// if (idx < 10)
// printf("%d, %d\n", idx, block[idx]);
for (int k = 0; k < nstep; k++) {
for (int i = idx+1; i < idx+3; ++i){
__syncthreads();
if (i < BLOCK_SIZE) {
if (block[i-1] > block[i]) {
// printf("swap %d %d\n", i-1, i);
tmp = block[i-1];
block[i-1] = block[i];
block[i] = tmp;
}
}
}
}
__syncthreads();
arr[offset + idx] = block[idx];
arr[offset + idx+1] = block[idx+1];
}
__global__ void k_merge(int64_t *arr, int len, bool odd) {
int offset = blockIdx.x * BLOCK_SIZE * 2;
int idx = threadIdx.x;
int64_t tmp;
if (odd) offset += BLOCK_SIZE;
if (offset + BLOCK_SIZE * 2 > len) return;
__shared__ int64_t block[BLOCK_SIZE * 2];
int idt = 2 * idx;
block[idt] = arr[offset + idt];
block[idt+1] = arr[offset + idt+1];
__syncthreads();
idt = BLOCK_SIZE * 2 - idx - 1;
if (idx < BLOCK_SIZE && block[idx] > block[idt]) {
tmp = block[idx];
block[idx] = block[idt];
block[idt] = tmp;
}
int step = BLOCK_SIZE / 2;
while (step != 0) {
__syncthreads();
idt = idx;
if ((idx / step) % 2 == 1) {
idt -= step - BLOCK_SIZE;
}
if (block[idt] > block[idt + step]) {
tmp = block[idt];
block[idt] = block[idt+step];
block[idt+step] = tmp;
}
step /= 2;
}
idt = 2 * idx;
__syncthreads();
arr[offset + idt] = block[idt];
arr[offset + idt+1] = block[idt+1];
}
int main() {
time_t tm, tmc;
time(&tm);
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int n;
fread(&n, sizeof(int), 1, stdin);
fprintf(stderr, "n=%d\t", n);
int rn = n;
if (n % BLOCK_SIZE != 0) {
n += BLOCK_SIZE - n % BLOCK_SIZE;
}
int64_t *arr = new int64_t[n];
int l;
// fprintf(stderr, "\n");
for (int i = 0; i < n; i++) {
l = INF;
if (i < rn) {
fread(&l, sizeof(int), 1, stdin);
//fprintf(stderr, "%d ", l);
}
arr[i] = l;
}
//fprintf(stderr, "\n");
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int64_t *darr;
CSC(hipMalloc(&darr, sizeof(int64_t) * n));
CSC(hipMemcpy(darr, arr, sizeof(int64_t) * n, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( k_blocksort), dim3(GRID_SIZE), dim3(BLOCK_SIZE / 2), 0, 0, darr, n);
CSC(hipGetLastError());
//CSC(hipMemcpy(arr, darr, sizeof(int64_t) * n, hipMemcpyDeviceToHost));
//cout << "after sort" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
fprintf(stderr, "num of steps %d\n", n / BLOCK_SIZE);
if (n > BLOCK_SIZE) {
for (int step = 0; step < n / BLOCK_SIZE; step++) {
hipLaunchKernelGGL(( k_merge), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, darr, n, step & 1);
CSC(hipGetLastError());
//CSC(hipMemcpy(arr, darr, sizeof(int64_t) * n, hipMemcpyDeviceToHost));
//cout << "after step" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
}
}
CSC(hipMemcpy(arr, darr, sizeof(int64_t) * rn, hipMemcpyDeviceToHost));
CSC(hipFree(darr));
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
for (int i = 0; i < rn; i++) {
l = (int)arr[i];
// cout << l << " ";
fwrite(&l, sizeof(int), 1, stdout);
}
// cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
}
| 82e49b6453335585c3980bc5e14c1cd9b731c6a7.cu | #include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <limits>
using namespace std;
#define int8_t char
#define int16_t short
#define int32_t int
#define int64_t long long
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
cudaEvent_t start, stop;
float t;
void time_start() {
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));
}
void time_end() {
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
}
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = 16384;
//const int64_t INF = 2100000000;
const int64_t INF = 2147483647;
__global__ void k_blocksort(int64_t *arr, int len) {
if (blockIdx.x * BLOCK_SIZE >= len) {
return;
}
__shared__ int64_t block[BLOCK_SIZE];
int idx = threadIdx.x * 2;
int offset = blockIdx.x * BLOCK_SIZE;
int nstep = BLOCK_SIZE / 2;
// printf("offset %d", offset);
block[idx] = arr[offset + idx];
block[idx+1] = arr[offset + idx+1];
int64_t tmp;
// __syncthreads();
// if (idx < 10)
// printf("%d, %d\n", idx, block[idx]);
for (int k = 0; k < nstep; k++) {
for (int i = idx+1; i < idx+3; ++i){
__syncthreads();
if (i < BLOCK_SIZE) {
if (block[i-1] > block[i]) {
// printf("swap %d %d\n", i-1, i);
tmp = block[i-1];
block[i-1] = block[i];
block[i] = tmp;
}
}
}
}
__syncthreads();
arr[offset + idx] = block[idx];
arr[offset + idx+1] = block[idx+1];
}
__global__ void k_merge(int64_t *arr, int len, bool odd) {
int offset = blockIdx.x * BLOCK_SIZE * 2;
int idx = threadIdx.x;
int64_t tmp;
if (odd) offset += BLOCK_SIZE;
if (offset + BLOCK_SIZE * 2 > len) return;
__shared__ int64_t block[BLOCK_SIZE * 2];
int idt = 2 * idx;
block[idt] = arr[offset + idt];
block[idt+1] = arr[offset + idt+1];
__syncthreads();
idt = BLOCK_SIZE * 2 - idx - 1;
if (idx < BLOCK_SIZE && block[idx] > block[idt]) {
tmp = block[idx];
block[idx] = block[idt];
block[idt] = tmp;
}
int step = BLOCK_SIZE / 2;
while (step != 0) {
__syncthreads();
idt = idx;
if ((idx / step) % 2 == 1) {
idt -= step - BLOCK_SIZE;
}
if (block[idt] > block[idt + step]) {
tmp = block[idt];
block[idt] = block[idt+step];
block[idt+step] = tmp;
}
step /= 2;
}
idt = 2 * idx;
__syncthreads();
arr[offset + idt] = block[idt];
arr[offset + idt+1] = block[idt+1];
}
int main() {
time_t tm, tmc;
time(&tm);
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int n;
fread(&n, sizeof(int), 1, stdin);
fprintf(stderr, "n=%d\t", n);
int rn = n;
if (n % BLOCK_SIZE != 0) {
n += BLOCK_SIZE - n % BLOCK_SIZE;
}
int64_t *arr = new int64_t[n];
int l;
// fprintf(stderr, "\n");
for (int i = 0; i < n; i++) {
l = INF;
if (i < rn) {
fread(&l, sizeof(int), 1, stdin);
//fprintf(stderr, "%d ", l);
}
arr[i] = l;
}
//fprintf(stderr, "\n");
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int64_t *darr;
CSC(cudaMalloc(&darr, sizeof(int64_t) * n));
CSC(cudaMemcpy(darr, arr, sizeof(int64_t) * n, cudaMemcpyHostToDevice));
k_blocksort<<<GRID_SIZE, BLOCK_SIZE / 2>>>(darr, n);
CSC(cudaGetLastError());
//CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * n, cudaMemcpyDeviceToHost));
//cout << "after sort" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
fprintf(stderr, "num of steps %d\n", n / BLOCK_SIZE);
if (n > BLOCK_SIZE) {
for (int step = 0; step < n / BLOCK_SIZE; step++) {
k_merge<<<GRID_SIZE, BLOCK_SIZE>>>(darr, n, step & 1);
CSC(cudaGetLastError());
//CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * n, cudaMemcpyDeviceToHost));
//cout << "after step" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
}
}
CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * rn, cudaMemcpyDeviceToHost));
CSC(cudaFree(darr));
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
for (int i = 0; i < rn; i++) {
l = (int)arr[i];
// cout << l << " ";
fwrite(&l, sizeof(int), 1, stdout);
}
// cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
}
|
a421ab4098701576527b55e3b81f3e0a433016a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fupdate_inter(float *z, float *g, float invlambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z[3 * (idx)+0];
if ((px>0)) DIVZ -= z[3 * (idx - 1) + 0];
if ((py<(ny - 1))) DIVZ += z[3 * (idx)+1];
if ((py>0)) DIVZ -= z[3 * (idx - nx) + 1];
// update f
z[3 * idx + 2] = DIVZ - g[idx] * invlambda;
}
} | a421ab4098701576527b55e3b81f3e0a433016a5.cu | #include "includes.h"
__global__ void fupdate_inter(float *z, float *g, float invlambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += z[3 * (idx)+0];
if ((px>0)) DIVZ -= z[3 * (idx - 1) + 0];
if ((py<(ny - 1))) DIVZ += z[3 * (idx)+1];
if ((py>0)) DIVZ -= z[3 * (idx - nx) + 1];
// update f
z[3 * idx + 2] = DIVZ - g[idx] * invlambda;
}
} |
5241c5bedc34d71caa8c7e174aa98680a8f8ffd9.hip | // !!! This is a file automatically generated by hipify!!!
// takes a graph adjacency matrix for a directed graph, and converts it
// to a 2-column matrix of pairs (i,j), meaning an edge from vertex i to
// vertex j; the output matrix must be in lexicographical order
// not claimed efficient, either in speed or in memory usage
#include <hip/hip_runtime.h>
#include <stdio.h>
// needs -lrt link flag for C++
#include <time.h>
float timediff(struct timespec t1, struct timespec t2)
{ if (t1.tv_nsec > t2.tv_nsec) {
t2.tv_sec -= 1;
t2.tv_nsec += 1000000000;
}
return t2.tv_sec-t1.tv_sec + 0.000000001 * (t2.tv_nsec-t1.tv_nsec);
}
// tgkernel1() finds the number of 1s to be handled by a thread, used
// to determine where in the output matrix a thread writes its portion
// arguments:
// dadjm: the adjacency matrix (NOT assumed symmetric), 1 for edge, 0
// otherwise; note: matrix is overwritten by the function
// n: number of rows and columns of adjm
// dcounts: output array, counts of 1s
__global__ void tgkernel1(int *dadjm, int n, int *dcounts)
{ int tot1s,j;
// need to find my thread number among the totality of all
// threads in all blocks
int me = blockDim.x * blockIdx.x + threadIdx.x;
tot1s = 0;
for (j = 0; j < n; j++) {
if (dadjm[n*me+j] == 1) {
dadjm[n*me+tot1s++] = j;
}
dcounts[me] = tot1s;
}
}
// tgkernel2() has the given thread write its rows into the output
// matrix
__global__ void tgkernel2(int *dadjm, int n,
int *dcounts, int *dstarts, int *doutm)
{ int outrow,num1si,j;
int me = blockDim.x * blockIdx.x + threadIdx.x;
// fill in this thread's portion of doutm
outrow = dstarts[me];
num1si = dcounts[me];
if (num1si > 0) {
for (j = 0; j < num1si; j++) {
doutm[2*outrow+2*j] = me;
doutm[2*outrow+2*j+1] = dadjm[n*me+j];
}
}
}
// replaces counts by cumulative counts
void cumulcounts(int *c, int *s, int n)
{ int i;
s[0] = 0;
for (i = 1; i < n; i++) {
s[i] = s[i-1] + c[i-1];
}
}
int *transgraph(int *hadjm, int n, int *nout, int gsize, int bsize)
{ int *dadjm; // device adjacency matrix
int *houtm; // host output matrix
int *doutm; // device output matrix
int *hcounts; // host counts vector
int *dcounts; // device counts vector
int *hstarts; // host starts vector
int *dstarts; // device starts vector
hcounts = (int *) malloc(n*sizeof(int));
hstarts = (int *) malloc(n*sizeof(int));
hipMalloc((void **)&dadjm,n*n*sizeof(int));
hipMalloc((void **)&dcounts,n*sizeof(int));
hipMalloc((void **)&dstarts,n*sizeof(int));
houtm = (int *) malloc(n*n*sizeof(int));
hipMalloc((void **)&doutm,n*n*sizeof(int));
hipMemcpy(dadjm,hadjm,n*n*sizeof(int),hipMemcpyHostToDevice);
dim3 dimGrid(gsize,1);
dim3 dimBlock(bsize,1,1);
// calculate counts and starts first
hipLaunchKernelGGL(( tgkernel1), dim3(dimGrid),dim3(dimBlock), 0, 0, dadjm,n,dcounts);
hipMemcpy(hcounts,dcounts,n*sizeof(int),hipMemcpyDeviceToHost);
cumulcounts(hcounts,hstarts,n);
*nout = hstarts[n-1] + hcounts[n-1];
hipMemcpy(dstarts,hstarts,n*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( tgkernel2), dim3(dimGrid),dim3(dimBlock), 0, 0, dadjm,n,dcounts,dstarts,doutm);
hipMemcpy(houtm,doutm,2*(*nout)*sizeof(int),hipMemcpyDeviceToHost);
free(hcounts);
free(hstarts);
hipFree(dadjm);
hipFree(dcounts);
hipFree(dstarts);
return houtm;
}
int main(int argc, char **argv)
{ int i,j;
int *adjm; // host adjacency matrix
int *outm; // host output matrix
int n = atoi(argv[1]);
int gsize = atoi(argv[2]);
int bsize = atoi(argv[3]);
int nout;
adjm = (int *) malloc(n*n*sizeof(int));
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
if (i == j) adjm[n*i+j] = 0;
else adjm[n*i+j] = rand() % 2;
if (n < 10) {
printf("adjacency matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) printf("%d ",adjm[n*i+j]);
printf("\n");
}
}
struct timespec bgn,nd;
clock_gettime(CLOCK_REALTIME, &bgn);
outm = transgraph(adjm,n,&nout,gsize,bsize);
printf("num rows in out matrix = %d\n",nout);
if (nout < 50) {
printf("out matrix: \n");
for (i = 0; i < nout; i++)
printf("%d %d\n",outm[2*i],outm[2*i+1]);
}
clock_gettime(CLOCK_REALTIME, &nd);
printf("%f\n",timediff(bgn,nd));
}
| 5241c5bedc34d71caa8c7e174aa98680a8f8ffd9.cu | // takes a graph adjacency matrix for a directed graph, and converts it
// to a 2-column matrix of pairs (i,j), meaning an edge from vertex i to
// vertex j; the output matrix must be in lexicographical order
// not claimed efficient, either in speed or in memory usage
#include <cuda.h>
#include <stdio.h>
// needs -lrt link flag for C++
#include <time.h>
float timediff(struct timespec t1, struct timespec t2)
{ if (t1.tv_nsec > t2.tv_nsec) {
t2.tv_sec -= 1;
t2.tv_nsec += 1000000000;
}
return t2.tv_sec-t1.tv_sec + 0.000000001 * (t2.tv_nsec-t1.tv_nsec);
}
// tgkernel1() finds the number of 1s to be handled by a thread, used
// to determine where in the output matrix a thread writes its portion
// arguments:
// dadjm: the adjacency matrix (NOT assumed symmetric), 1 for edge, 0
// otherwise; note: matrix is overwritten by the function
// n: number of rows and columns of adjm
// dcounts: output array, counts of 1s
__global__ void tgkernel1(int *dadjm, int n, int *dcounts)
{ int tot1s,j;
// need to find my thread number among the totality of all
// threads in all blocks
int me = blockDim.x * blockIdx.x + threadIdx.x;
tot1s = 0;
for (j = 0; j < n; j++) {
if (dadjm[n*me+j] == 1) {
dadjm[n*me+tot1s++] = j;
}
dcounts[me] = tot1s;
}
}
// tgkernel2() has the given thread write its rows into the output
// matrix
__global__ void tgkernel2(int *dadjm, int n,
int *dcounts, int *dstarts, int *doutm)
{ int outrow,num1si,j;
int me = blockDim.x * blockIdx.x + threadIdx.x;
// fill in this thread's portion of doutm
outrow = dstarts[me];
num1si = dcounts[me];
if (num1si > 0) {
for (j = 0; j < num1si; j++) {
doutm[2*outrow+2*j] = me;
doutm[2*outrow+2*j+1] = dadjm[n*me+j];
}
}
}
// replaces counts by cumulative counts
void cumulcounts(int *c, int *s, int n)
{ int i;
s[0] = 0;
for (i = 1; i < n; i++) {
s[i] = s[i-1] + c[i-1];
}
}
int *transgraph(int *hadjm, int n, int *nout, int gsize, int bsize)
{ int *dadjm; // device adjacency matrix
int *houtm; // host output matrix
int *doutm; // device output matrix
int *hcounts; // host counts vector
int *dcounts; // device counts vector
int *hstarts; // host starts vector
int *dstarts; // device starts vector
hcounts = (int *) malloc(n*sizeof(int));
hstarts = (int *) malloc(n*sizeof(int));
cudaMalloc((void **)&dadjm,n*n*sizeof(int));
cudaMalloc((void **)&dcounts,n*sizeof(int));
cudaMalloc((void **)&dstarts,n*sizeof(int));
houtm = (int *) malloc(n*n*sizeof(int));
cudaMalloc((void **)&doutm,n*n*sizeof(int));
cudaMemcpy(dadjm,hadjm,n*n*sizeof(int),cudaMemcpyHostToDevice);
dim3 dimGrid(gsize,1);
dim3 dimBlock(bsize,1,1);
// calculate counts and starts first
tgkernel1<<<dimGrid,dimBlock>>>(dadjm,n,dcounts);
cudaMemcpy(hcounts,dcounts,n*sizeof(int),cudaMemcpyDeviceToHost);
cumulcounts(hcounts,hstarts,n);
*nout = hstarts[n-1] + hcounts[n-1];
cudaMemcpy(dstarts,hstarts,n*sizeof(int),cudaMemcpyHostToDevice);
tgkernel2<<<dimGrid,dimBlock>>>(dadjm,n,dcounts,dstarts,doutm);
cudaMemcpy(houtm,doutm,2*(*nout)*sizeof(int),cudaMemcpyDeviceToHost);
free(hcounts);
free(hstarts);
cudaFree(dadjm);
cudaFree(dcounts);
cudaFree(dstarts);
return houtm;
}
int main(int argc, char **argv)
{ int i,j;
int *adjm; // host adjacency matrix
int *outm; // host output matrix
int n = atoi(argv[1]);
int gsize = atoi(argv[2]);
int bsize = atoi(argv[3]);
int nout;
adjm = (int *) malloc(n*n*sizeof(int));
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
if (i == j) adjm[n*i+j] = 0;
else adjm[n*i+j] = rand() % 2;
if (n < 10) {
printf("adjacency matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) printf("%d ",adjm[n*i+j]);
printf("\n");
}
}
struct timespec bgn,nd;
clock_gettime(CLOCK_REALTIME, &bgn);
outm = transgraph(adjm,n,&nout,gsize,bsize);
printf("num rows in out matrix = %d\n",nout);
if (nout < 50) {
printf("out matrix: \n");
for (i = 0; i < nout; i++)
printf("%d %d\n",outm[2*i],outm[2*i+1]);
}
clock_gettime(CLOCK_REALTIME, &nd);
printf("%f\n",timediff(bgn,nd));
}
|
826422ab4d543b153ec04a6ae5122457b2cf0f08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <device_launch_parameters.h>
#include <hipcub/hipcub.hpp>
#include "common_kernels.cuh"
#include "kernels_hip.cuh"
using namespace clustering;
__constant__ float expected_point[MAX_DIM];
void assign_constant_storage(const float* value, csize_t size, hipMemcpyKind kind, hipStream_t stream)
{
CUCH(hipMemcpyToSymbolAsync(expected_point, value, size, (size_t)0, kind, stream));
}
#define BUFF_SIZE 32
template<size_t DIM_X>
__global__ void covariance(const float* __restrict__ points, float* __restrict__ cov_matrix, csize_t count, csize_t dim)
{
typedef hipcub::BlockReduce<float, DIM_X> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float cov_point[BUFF_SIZE];
csize_t cov_idx = 0;
csize_t cov_size = ((dim + 1) * dim) / 2;
while (cov_idx < cov_size)
{
auto need = cov_size - cov_idx;
need = need > BUFF_SIZE ? BUFF_SIZE : need;
auto end = cov_idx + need;
memset(cov_point, 0, need * sizeof(float));
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count; idx += gridDim.x * blockDim.x)
{
for (csize_t point_idx = cov_idx; point_idx < end; point_idx++)
{
auto coords = compute_coordinates(dim, point_idx);
cov_point[point_idx - cov_idx] += (points[idx * dim + coords.x] - expected_point[coords.x])
* (points[idx * dim + coords.y] - expected_point[coords.y]);
}
}
for (csize_t i = 0; i < need; i++)
{
float aggregate = BlockReduce(temp_storage).Sum(cov_point[i]);
if (threadIdx.x == 0)
cov_matrix[blockIdx.x * cov_size + cov_idx + i] = aggregate;
}
cov_idx += need;
}
}
__global__ void finish_covariance(const float* __restrict__ in_cov_matrix,
float* __restrict__ out_cov_matrix,
csize_t grid_size,
csize_t divisor,
csize_t dim)
{
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t i = threadIdx.x; i < cov_size; i += blockDim.x)
{
float sum = 0;
for (size_t j = 0; j < grid_size; j++)
{
sum += in_cov_matrix[j * cov_size + i];
}
sum /= divisor - 1;
auto coords = compute_coordinates(dim, i);
out_cov_matrix[coords.x + coords.y * dim] = sum;
out_cov_matrix[coords.x * dim + coords.y] = sum;
}
}
__global__ void store_icov_data(float* __restrict__ icov_dest,
float* __restrict__ mf_dest,
const float* __restrict__ icov_src,
const float mf_src,
clustering::csize_t dim)
{
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t idx = threadIdx.x; idx < cov_size; idx += blockDim.x)
{
auto coords = compute_coordinates(dim, idx);
if (coords.x == coords.y)
icov_dest[idx] = icov_src[coords.x + coords.y * dim];
else
icov_dest[idx] = 2 * icov_src[coords.x + coords.y * dim];
}
if (threadIdx.x == 0 && mf_dest)
*mf_dest = mf_src;
}
__device__ void reduce_mul_warp(float* __restrict__ point)
{
for (unsigned int offset = warpSize / 2; offset > 0; offset /= 2)
*point *= __shfl_down_sync(0xFFFFFFFF, *point, offset);
}
__device__ void reduce_mul_block(float* __restrict__ point, float* __restrict__ shared_mem)
{
reduce_mul_warp(point);
auto lane_id = threadIdx.x % warpSize;
auto warp_id = threadIdx.x / warpSize;
if (lane_id == 0)
memcpy(shared_mem + warp_id, point, sizeof(float));
__syncthreads();
*point = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[threadIdx.x] : 1;
reduce_mul_warp(point);
}
__global__ void transform_cov(float* __restrict__ matrix,
csize_t dim,
float weight_factor,
bool use_cholesky,
const float* __restrict__ cholesky_decomp,
const int* __restrict__ cholesky_success)
{
__shared__ float shared[1];
float mf = 1.f;
if (use_cholesky && *cholesky_success == 0)
{
for (csize_t idx = threadIdx.x; idx < dim; idx += blockDim.x)
mf *= powf(cholesky_decomp[idx * (dim + 1)], 2.f / dim);
__syncthreads();
reduce_mul_warp(&mf);
if (threadIdx.x == 0)
shared[0] = mf;
__syncthreads();
mf = shared[0];
}
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t idx = threadIdx.x; idx < cov_size; idx += blockDim.x)
{
auto coords = compute_coordinates(dim, idx);
matrix[coords.x + coords.y * dim] =
weight_factor * matrix[coords.x + coords.y * dim] + (1 - weight_factor) * mf * (coords.x == coords.y);
}
}
__global__ void compute_store_icov_mf(float* __restrict__ dest, csize_t dim, const float* __restrict__ cholesky_decomp)
{
float icmf = 1.f;
for (csize_t idx = threadIdx.x; idx < dim; idx += blockDim.x)
icmf *= powf(cholesky_decomp[idx * (dim + 1)], -2.f / dim);
__syncthreads();
reduce_mul_warp(&icmf);
if (threadIdx.x == 0)
*dest = icmf;
}
void run_covariance(const float* points,
float* work_covariance,
float* out_covariance,
csize_t cluster_size,
csize_t dim,
kernel_info info)
{
auto block_dim = ((cluster_size + 31) / 32) * 32;
auto grid_dim = (block_dim + 1023) / 1024;
block_dim = block_dim > info.block_dim ? info.block_dim : block_dim;
grid_dim = grid_dim > info.grid_dim ? info.grid_dim : grid_dim;
if (block_dim == 32)
hipLaunchKernelGGL(( covariance<32>), dim3(grid_dim), dim3(32), 0, info.stream, points, work_covariance, cluster_size, dim);
else if (block_dim <= 64)
hipLaunchKernelGGL(( covariance<64>), dim3(grid_dim), dim3(64), 0, info.stream, points, work_covariance, cluster_size, dim);
else if (block_dim <= 128)
hipLaunchKernelGGL(( covariance<128>), dim3(grid_dim), dim3(128), 0, info.stream, points, work_covariance, cluster_size, dim);
else if (block_dim <= 256)
hipLaunchKernelGGL(( covariance<256>), dim3(grid_dim), dim3(256), 0, info.stream, points, work_covariance, cluster_size, dim);
else if (block_dim <= 512)
hipLaunchKernelGGL(( covariance<512>), dim3(grid_dim), dim3(512), 0, info.stream, points, work_covariance, cluster_size, dim);
else
hipLaunchKernelGGL(( covariance<1024>), dim3(grid_dim), dim3(1024), 0, info.stream, points, work_covariance, cluster_size, dim);
hipLaunchKernelGGL(( finish_covariance), dim3(1), dim3(32), 0, info.stream, work_covariance, out_covariance, grid_dim, cluster_size, dim);
}
void run_store_icovariance_data(float* icov_dest,
float* mf_dest,
const float* icov_src,
const float mf_src,
clustering::csize_t dim,
hipStream_t stream)
{
hipLaunchKernelGGL(( store_icov_data), dim3(1), dim3(32), 0, stream, icov_dest, mf_dest, icov_src, mf_src, dim);
}
void run_transform_cov(float* matrix,
csize_t dim,
float weight_factor,
bool use_cholesky,
const float* cholesky_decomp,
const int* cholesky_success,
hipStream_t stream)
{
hipLaunchKernelGGL(( transform_cov), dim3(1), dim3(32), 0, stream, matrix, dim, weight_factor, use_cholesky, cholesky_decomp, cholesky_success);
}
void run_compute_store_icov_mf(float* dest, csize_t dim, const float* cholesky_decomp, hipStream_t stream)
{
hipLaunchKernelGGL(( compute_store_icov_mf), dim3(1), dim3(32), 0, stream, dest, dim, cholesky_decomp);
}
__global__ void minus(
const float* __restrict__ x, const float* __restrict__ y, float* __restrict__ z, csize_t dim, csize_t count)
{
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count * dim; idx += gridDim.x * blockDim.x)
{
auto r = idx / dim;
auto c = idx % dim;
z[c * count + r] = x[idx] - y[c];
}
}
void run_minus(const float* x, const float* y, float* z, csize_t dim, csize_t count, kernel_info info)
{
hipLaunchKernelGGL(( minus), dim3(info.grid_dim), dim3(info.block_dim), 0, info.stream, x, y, z, dim, count);
}
| 826422ab4d543b153ec04a6ae5122457b2cf0f08.cu | #include <device_launch_parameters.h>
#include <cub/block/block_reduce.cuh>
#include "common_kernels.cuh"
#include "kernels.cuh"
using namespace clustering;
__constant__ float expected_point[MAX_DIM];
void assign_constant_storage(const float* value, csize_t size, cudaMemcpyKind kind, cudaStream_t stream)
{
CUCH(cudaMemcpyToSymbolAsync(expected_point, value, size, (size_t)0, kind, stream));
}
#define BUFF_SIZE 32
template<size_t DIM_X>
__global__ void covariance(const float* __restrict__ points, float* __restrict__ cov_matrix, csize_t count, csize_t dim)
{
typedef cub::BlockReduce<float, DIM_X> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
float cov_point[BUFF_SIZE];
csize_t cov_idx = 0;
csize_t cov_size = ((dim + 1) * dim) / 2;
while (cov_idx < cov_size)
{
auto need = cov_size - cov_idx;
need = need > BUFF_SIZE ? BUFF_SIZE : need;
auto end = cov_idx + need;
memset(cov_point, 0, need * sizeof(float));
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count; idx += gridDim.x * blockDim.x)
{
for (csize_t point_idx = cov_idx; point_idx < end; point_idx++)
{
auto coords = compute_coordinates(dim, point_idx);
cov_point[point_idx - cov_idx] += (points[idx * dim + coords.x] - expected_point[coords.x])
* (points[idx * dim + coords.y] - expected_point[coords.y]);
}
}
for (csize_t i = 0; i < need; i++)
{
float aggregate = BlockReduce(temp_storage).Sum(cov_point[i]);
if (threadIdx.x == 0)
cov_matrix[blockIdx.x * cov_size + cov_idx + i] = aggregate;
}
cov_idx += need;
}
}
__global__ void finish_covariance(const float* __restrict__ in_cov_matrix,
float* __restrict__ out_cov_matrix,
csize_t grid_size,
csize_t divisor,
csize_t dim)
{
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t i = threadIdx.x; i < cov_size; i += blockDim.x)
{
float sum = 0;
for (size_t j = 0; j < grid_size; j++)
{
sum += in_cov_matrix[j * cov_size + i];
}
sum /= divisor - 1;
auto coords = compute_coordinates(dim, i);
out_cov_matrix[coords.x + coords.y * dim] = sum;
out_cov_matrix[coords.x * dim + coords.y] = sum;
}
}
__global__ void store_icov_data(float* __restrict__ icov_dest,
float* __restrict__ mf_dest,
const float* __restrict__ icov_src,
const float mf_src,
clustering::csize_t dim)
{
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t idx = threadIdx.x; idx < cov_size; idx += blockDim.x)
{
auto coords = compute_coordinates(dim, idx);
if (coords.x == coords.y)
icov_dest[idx] = icov_src[coords.x + coords.y * dim];
else
icov_dest[idx] = 2 * icov_src[coords.x + coords.y * dim];
}
if (threadIdx.x == 0 && mf_dest)
*mf_dest = mf_src;
}
__device__ void reduce_mul_warp(float* __restrict__ point)
{
for (unsigned int offset = warpSize / 2; offset > 0; offset /= 2)
*point *= __shfl_down_sync(0xFFFFFFFF, *point, offset);
}
__device__ void reduce_mul_block(float* __restrict__ point, float* __restrict__ shared_mem)
{
reduce_mul_warp(point);
auto lane_id = threadIdx.x % warpSize;
auto warp_id = threadIdx.x / warpSize;
if (lane_id == 0)
memcpy(shared_mem + warp_id, point, sizeof(float));
__syncthreads();
*point = (threadIdx.x < blockDim.x / warpSize) ? shared_mem[threadIdx.x] : 1;
reduce_mul_warp(point);
}
__global__ void transform_cov(float* __restrict__ matrix,
csize_t dim,
float weight_factor,
bool use_cholesky,
const float* __restrict__ cholesky_decomp,
const int* __restrict__ cholesky_success)
{
__shared__ float shared[1];
float mf = 1.f;
if (use_cholesky && *cholesky_success == 0)
{
for (csize_t idx = threadIdx.x; idx < dim; idx += blockDim.x)
mf *= powf(cholesky_decomp[idx * (dim + 1)], 2.f / dim);
__syncthreads();
reduce_mul_warp(&mf);
if (threadIdx.x == 0)
shared[0] = mf;
__syncthreads();
mf = shared[0];
}
csize_t cov_size = ((dim + 1) * dim) / 2;
for (csize_t idx = threadIdx.x; idx < cov_size; idx += blockDim.x)
{
auto coords = compute_coordinates(dim, idx);
matrix[coords.x + coords.y * dim] =
weight_factor * matrix[coords.x + coords.y * dim] + (1 - weight_factor) * mf * (coords.x == coords.y);
}
}
__global__ void compute_store_icov_mf(float* __restrict__ dest, csize_t dim, const float* __restrict__ cholesky_decomp)
{
float icmf = 1.f;
for (csize_t idx = threadIdx.x; idx < dim; idx += blockDim.x)
icmf *= powf(cholesky_decomp[idx * (dim + 1)], -2.f / dim);
__syncthreads();
reduce_mul_warp(&icmf);
if (threadIdx.x == 0)
*dest = icmf;
}
void run_covariance(const float* points,
float* work_covariance,
float* out_covariance,
csize_t cluster_size,
csize_t dim,
kernel_info info)
{
auto block_dim = ((cluster_size + 31) / 32) * 32;
auto grid_dim = (block_dim + 1023) / 1024;
block_dim = block_dim > info.block_dim ? info.block_dim : block_dim;
grid_dim = grid_dim > info.grid_dim ? info.grid_dim : grid_dim;
if (block_dim == 32)
covariance<32><<<grid_dim, 32, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
else if (block_dim <= 64)
covariance<64><<<grid_dim, 64, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
else if (block_dim <= 128)
covariance<128><<<grid_dim, 128, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
else if (block_dim <= 256)
covariance<256><<<grid_dim, 256, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
else if (block_dim <= 512)
covariance<512><<<grid_dim, 512, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
else
covariance<1024><<<grid_dim, 1024, 0, info.stream>>>(points, work_covariance, cluster_size, dim);
finish_covariance<<<1, 32, 0, info.stream>>>(work_covariance, out_covariance, grid_dim, cluster_size, dim);
}
void run_store_icovariance_data(float* icov_dest,
float* mf_dest,
const float* icov_src,
const float mf_src,
clustering::csize_t dim,
cudaStream_t stream)
{
store_icov_data<<<1, 32, 0, stream>>>(icov_dest, mf_dest, icov_src, mf_src, dim);
}
void run_transform_cov(float* matrix,
csize_t dim,
float weight_factor,
bool use_cholesky,
const float* cholesky_decomp,
const int* cholesky_success,
cudaStream_t stream)
{
transform_cov<<<1, 32, 0, stream>>>(matrix, dim, weight_factor, use_cholesky, cholesky_decomp, cholesky_success);
}
void run_compute_store_icov_mf(float* dest, csize_t dim, const float* cholesky_decomp, cudaStream_t stream)
{
compute_store_icov_mf<<<1, 32, 0, stream>>>(dest, dim, cholesky_decomp);
}
__global__ void minus(
const float* __restrict__ x, const float* __restrict__ y, float* __restrict__ z, csize_t dim, csize_t count)
{
for (csize_t idx = blockDim.x * blockIdx.x + threadIdx.x; idx < count * dim; idx += gridDim.x * blockDim.x)
{
auto r = idx / dim;
auto c = idx % dim;
z[c * count + r] = x[idx] - y[c];
}
}
void run_minus(const float* x, const float* y, float* z, csize_t dim, csize_t count, kernel_info info)
{
minus<<<info.grid_dim, info.block_dim, 0, info.stream>>>(x, y, z, dim, count);
}
|
3c2bc266ba5b8b5414f8899a482f2df237a92464.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/copying.hpp>
#include <utilities/legacy/error_utils.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
struct dispatch_map_type {
template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value
and not std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(
num_destination_rows == thrust::count_if(
rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(
gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(
gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream
);
}
else {
destination_table =
gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream
);
}
return destination_table;
}
template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value
or std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds, bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0) {
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table =
cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{},
source_table, gather_map,
gather_map.size(),
check_bounds, ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, rmm::mr::device_memory_resource* mr) {
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace exp
} // namespace cudf
| 3c2bc266ba5b8b5414f8899a482f2df237a92464.cu | #include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/copying.hpp>
#include <utilities/legacy/error_utils.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
struct dispatch_map_type {
template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value
and not std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(
num_destination_rows == thrust::count_if(
rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(
gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(
gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream
);
}
else {
destination_table =
gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream
);
}
return destination_table;
}
template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value
or std::is_same<map_type, cudf::experimental::bool8>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds, bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0) {
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table =
cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{},
source_table, gather_map,
gather_map.size(),
check_bounds, ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, rmm::mr::device_memory_resource* mr) {
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace exp
} // namespace cudf
|
47ed3abc567999663479ec44a8ab06c86af9a6d3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
template<typename T, typename KERNEL_PARAMS_TYPE>
void multihead_attention_(const KERNEL_PARAMS_TYPE& params, const hipStream_t& stream)
{
switch (params.hidden_size_per_head) {
case 32:
mmha_launch_kernel<T, 32, 32, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 48:
mmha_launch_kernel<T, 48, 64, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 64:
mmha_launch_kernel<T, 64, 64, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 80:
mmha_launch_kernel<T, 80, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 96:
mmha_launch_kernel<T, 96, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 112:
mmha_launch_kernel<T, 112, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 128:
mmha_launch_kernel<T, 128, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 144:
mmha_launch_kernel<T, 144, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 160:
mmha_launch_kernel<T, 160, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 192:
mmha_launch_kernel<T, 192, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 224:
mmha_launch_kernel<T, 224, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 256:
mmha_launch_kernel<T, 256, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
default:
assert(false);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void masked_multihead_attention(const Masked_multihead_attention_params<float>& params, const hipStream_t& stream)
{
multihead_attention_<float, Masked_multihead_attention_params<float>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t>& params, const hipStream_t& stream)
{
multihead_attention_<uint16_t, Masked_multihead_attention_params<uint16_t>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_BF16
void masked_multihead_attention(const Masked_multihead_attention_params<__nv_bfloat16>& params,
const hipStream_t& stream)
{
multihead_attention_<__nv_bfloat16, Masked_multihead_attention_params<__nv_bfloat16>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_FP8
void masked_multihead_attention(const Masked_multihead_attention_params<__nv_fp8_e4m3>& params,
const hipStream_t& stream)
{
multihead_attention_<__nv_fp8_e4m3, Masked_multihead_attention_params<__nv_fp8_e4m3>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
void cross_multihead_attention(const Cross_multihead_attention_params<float>& params, const hipStream_t& stream)
{
multihead_attention_<float, Cross_multihead_attention_params<float>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void cross_multihead_attention(const Cross_multihead_attention_params<uint16_t>& params, const hipStream_t& stream)
{
multihead_attention_<uint16_t, Cross_multihead_attention_params<uint16_t>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_BF16
void cross_multihead_attention(const Cross_multihead_attention_params<__nv_bfloat16>& params,
const hipStream_t& stream)
{
multihead_attention_<__nv_bfloat16, Cross_multihead_attention_params<__nv_bfloat16>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_FP8
void cross_multihead_attention(const Cross_multihead_attention_params<__nv_fp8_e4m3>& params,
const hipStream_t& stream)
{
multihead_attention_<__nv_fp8_e4m3, Cross_multihead_attention_params<__nv_fp8_e4m3>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
| 47ed3abc567999663479ec44a8ab06c86af9a6d3.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp"
#include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h"
#include "src/fastertransformer/utils/cuda_bf16_wrapper.h"
#include <assert.h>
#include <float.h>
#include <type_traits>
template<typename T, typename KERNEL_PARAMS_TYPE>
void multihead_attention_(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream)
{
switch (params.hidden_size_per_head) {
case 32:
mmha_launch_kernel<T, 32, 32, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 48:
mmha_launch_kernel<T, 48, 64, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 64:
mmha_launch_kernel<T, 64, 64, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 80:
mmha_launch_kernel<T, 80, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 96:
mmha_launch_kernel<T, 96, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 112:
mmha_launch_kernel<T, 112, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 128:
mmha_launch_kernel<T, 128, 128, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 144:
mmha_launch_kernel<T, 144, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 160:
mmha_launch_kernel<T, 160, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 192:
mmha_launch_kernel<T, 192, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 224:
mmha_launch_kernel<T, 224, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
case 256:
mmha_launch_kernel<T, 256, 256, KERNEL_PARAMS_TYPE>(params, stream);
break;
default:
assert(false);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void masked_multihead_attention(const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream)
{
multihead_attention_<float, Masked_multihead_attention_params<float>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream)
{
multihead_attention_<uint16_t, Masked_multihead_attention_params<uint16_t>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_BF16
void masked_multihead_attention(const Masked_multihead_attention_params<__nv_bfloat16>& params,
const cudaStream_t& stream)
{
multihead_attention_<__nv_bfloat16, Masked_multihead_attention_params<__nv_bfloat16>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_FP8
void masked_multihead_attention(const Masked_multihead_attention_params<__nv_fp8_e4m3>& params,
const cudaStream_t& stream)
{
multihead_attention_<__nv_fp8_e4m3, Masked_multihead_attention_params<__nv_fp8_e4m3>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
void cross_multihead_attention(const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream)
{
multihead_attention_<float, Cross_multihead_attention_params<float>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
void cross_multihead_attention(const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream)
{
multihead_attention_<uint16_t, Cross_multihead_attention_params<uint16_t>>(params, stream);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_BF16
void cross_multihead_attention(const Cross_multihead_attention_params<__nv_bfloat16>& params,
const cudaStream_t& stream)
{
multihead_attention_<__nv_bfloat16, Cross_multihead_attention_params<__nv_bfloat16>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_FP8
void cross_multihead_attention(const Cross_multihead_attention_params<__nv_fp8_e4m3>& params,
const cudaStream_t& stream)
{
multihead_attention_<__nv_fp8_e4m3, Cross_multihead_attention_params<__nv_fp8_e4m3>>(params, stream);
}
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
|
326d12683954a1b58842d2b7b9aada8245f37952.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_stencil2D_4pt_hack5_cp_rows(double * dst, double * shared_cols, double *shared_rows,int tile_y,int M, int N){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){
printf("copy rows begin!\n");
}
#endif
int base_global_row = (tile_y * blockIdx.y );
int base_global_col = blockDim.x*blockIdx.x;
int base_global_idx = N*base_global_row + base_global_col ;
int nextRow = base_global_row+1;
bool legalNextRow = (nextRow<M)?1:0;
int t = threadIdx.x;
bool legalCurCol = (base_global_col + t)<N;
int idx = (base_global_row/tile_y)*2*N + t+base_global_col;
int idx_nextrow = idx + N;
if(legalCurCol){
shared_rows[idx] = dst[base_global_idx + t];
}
if(legalNextRow&&legalCurCol){
shared_rows[idx_nextrow] = dst[base_global_idx + N+t];
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
// if(threadIdx.x==0){
// printf("blockIdx.x = %d,blockIdx.y = %d\n",blockIdx.x,blockIdx.y);
// }
// if(blockIdx.y==1 && threadIdx.x==0){
// printf("addr: %d\n",idx_nextrow);
// }
if(blockIdx.y==0 && blockIdx.x==2 && (t==0 || t==1)){
printf("addr:%d, val = %f\n", idx_nextrow,shared_rows[idx_nextrow]);
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){
printf("copy rows finish!\n");
}
#endif
} | 326d12683954a1b58842d2b7b9aada8245f37952.cu | #include "includes.h"
__global__ void gpu_stencil2D_4pt_hack5_cp_rows(double * dst, double * shared_cols, double *shared_rows,int tile_y,int M, int N){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){
printf("copy rows begin!\n");
}
#endif
int base_global_row = (tile_y * blockIdx.y );
int base_global_col = blockDim.x*blockIdx.x;
int base_global_idx = N*base_global_row + base_global_col ;
int nextRow = base_global_row+1;
bool legalNextRow = (nextRow<M)?1:0;
int t = threadIdx.x;
bool legalCurCol = (base_global_col + t)<N;
int idx = (base_global_row/tile_y)*2*N + t+base_global_col;
int idx_nextrow = idx + N;
if(legalCurCol){
shared_rows[idx] = dst[base_global_idx + t];
}
if(legalNextRow&&legalCurCol){
shared_rows[idx_nextrow] = dst[base_global_idx + N+t];
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
// if(threadIdx.x==0){
// printf("blockIdx.x = %d,blockIdx.y = %d\n",blockIdx.x,blockIdx.y);
// }
// if(blockIdx.y==1 && threadIdx.x==0){
// printf("addr: %d\n",idx_nextrow);
// }
if(blockIdx.y==0 && blockIdx.x==2 && (t==0 || t==1)){
printf("addr:%d, val = %f\n", idx_nextrow,shared_rows[idx_nextrow]);
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){
printf("copy rows finish!\n");
}
#endif
} |
acf9731bfd4dc5de2fa1c2bb108b369c84d876d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <functional>
#include <algorithm>
#include <ctime>
#define ACCURACY 0.01
#define NUM_OF_GPU_THREADS 2
#define BLOCK_SIZE 32
#define NUM_OF_GPU_BLOCKS 4
#define MAX 3
#define MIN 0
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // hipDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = hipDeviceReset();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// hipError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel << <1, size >> > (dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
//
// return cudaStatus;
//}
void checkError(hipError_t err, int line) {
if (hipSuccess != err) {
std::cerr << "Error " << hipGetErrorName(err) << " happenend: " << hipGetErrorString(err) << " at line " << line << std::endl;
exit(-1);
}
}
//void matrixInit(float * matrix, int size) {
// for (int i = 0; i < size; ++i) {
// matrix[i] = rand() % (MAX - MIN + 1) + MIN;
// }
//}
__global__ void sgemmKernel(float * cudaA, float * cudaB, float * cudaC, int m, int n, int k, float alpha, float beta) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float B[BLOCK_SIZE][BLOCK_SIZE];
int row = bx * BLOCK_SIZE + tx;
int col = by * BLOCK_SIZE + ty;
float sum = 0;
for (int i = 0; i < gridDim.y; i++) {
int a_col = i * BLOCK_SIZE + ty;
int a_row = row;
if (a_row < m && a_col < k) {
A[tx][ty] = cudaA[a_row + a_col * m];
}
int b_col = col;
int b_row = i * BLOCK_SIZE + tx;
if (b_row < k && b_col < n){
B[tx][ty] = cudaB[b_row * n + b_col];
}
__syncthreads();
if (row < m && col < n) {
int j_end = ((i + 1) * BLOCK_SIZE < k) ? BLOCK_SIZE : k - i * BLOCK_SIZE;
for (int j = 0; j < j_end; j++) {
sum += A[tx][j] * B[j][ty];
}
}
__syncthreads();
}
if (row < m && col < n) {
cudaC[row + col * m] = 0;
cudaC[row + col * m] = cudaC[row + col * m] * beta + sum * alpha;
}
}
bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v)
{
std::cerr << "Opening file:" << fn << std::endl;
std::fstream f(fn, std::fstream::in);
if (!f.good()) {
return false;
}
// Read # of rows and cols
f >> nr_row;
f >> nr_col;
float data;
std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl;
while (f.good()) {
f >> data;
v.push_back(data);
}
v.pop_back(); // remove the duplicated last element
return true;
}
bool writeColMajorMatrixFile(const char *fn, int nr_row, int nr_col, std::vector<float>&v)
{
std::cerr << "Opening file:" << fn << " for write." << std::endl;
std::fstream f(fn, std::fstream::out);
if (!f.good()) {
return false;
}
// Read # of rows and cols
f << nr_row << " " << nr_col << " ";
std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl;
for (int i = 0; i < v.size(); ++i) {
f << v[i] << ' ';
}
f << "\n";
return true;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
float c = 0.0f;
for (int i = 0; i < k; ++i) {
float a = A[mm + i * lda];
float b = B[nn + i * ldb];
c += a * b;
}
C[mm + nn * ldc] = C[mm + nn * ldc] * beta + alpha * c;
}
}
}
void basicSgemm_par(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
float *cudaA, *cudaB, *cudaC;
int sizeA = m * k * sizeof(float), sizeB = k * n * sizeof(float), sizeC = m * n * sizeof(float);
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
checkError(hipMalloc(&cudaA, sizeA), __LINE__);
checkError(hipMemcpy(cudaA, A, sizeA, hipMemcpyHostToDevice), __LINE__);
checkError(hipMalloc(&cudaB, sizeB), __LINE__);
checkError(hipMemcpy(cudaB, B, sizeB, hipMemcpyHostToDevice), __LINE__);
checkError(hipMalloc(&cudaC, sizeC), __LINE__);
dim3 dimGrid((m + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( sgemmKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, cudaA, cudaB, cudaC, m, n, k, alpha, beta);
checkError(hipMemcpy(C, cudaC, sizeC, hipMemcpyDeviceToHost), __LINE__);
checkError(hipFree(cudaA), __LINE__);
checkError(hipFree(cudaB), __LINE__);
checkError(hipFree(cudaC), __LINE__);
}
int main(int argc, char *argv[]) {
int matArow, matAcol;
int matBrow, matBcol;
std::vector<float> matA;
std::vector<float> matBT;
//int m, n, k;
float timeSeq = 0.0f, timePar = 0.0f;
hipEvent_t start = hipEvent_t();
checkError(hipEventCreate(&start), __LINE__);
hipEvent_t stop = hipEvent_t();
checkError(hipEventCreate(&stop), __LINE__);
if (argc != 4)
{
fprintf(stderr, "Expecting three input filenames\n");
exit(-1);
}
/* Read in data */
// load A
readColMajorMatrixFile(argv[1], matArow, matAcol, matA);
// load B^T
readColMajorMatrixFile(argv[2], matBcol, matBrow, matBT);
/*m = std::stoi(argv[1]);
n = std::stoi(argv[2]);
k = std::stoi(argv[3]);
// A - m*k
// B - k*n
// C - m*n
matArow = m;
matAcol = k;
matBrow = k;
matBcol = n;
// allocate space for A and B
//matrixInit(&matA.front(), m*k);
//matrixInit(&matBT.front(), k*n);*/
// allocate space for C and D
std::vector<float> matC(matArow * matBcol);
std::vector<float> matD(matArow * matBcol);
//clock_t begin = clock();
hipEventRecord(start, 0);
// Use standard sgemm interface
basicSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matC.front(), matArow);
clock_t end = clock();
//timeSeq = float(end - begin) / CLOCKS_PER_SEC;
checkError(hipEventRecord(stop, 0), __LINE__);
checkError(hipEventSynchronize(stop), __LINE__);
checkError(hipEventElapsedTime(&timeSeq, start, stop), __LINE__);
timeSeq /= 1000;
hipEventRecord(start, 0);
// Use parallel sgemm interface
basicSgemm_par('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matD.front(), matArow);
checkError(hipEventRecord(stop, 0), __LINE__);
checkError(hipEventSynchronize(stop), __LINE__);
checkError(hipEventElapsedTime(&timePar, start, stop), __LINE__);
timePar /= 1000;
checkError(hipEventDestroy(start), __LINE__);
checkError(hipEventDestroy(stop), __LINE__);
writeColMajorMatrixFile(argv[3], matArow, matBcol, matC);
std::function<bool(double, double)> comparator = [](double left, double right) {
// Lambda function to compare 2 doubles with ACCURACY
return fabs(left - right) < ACCURACY;
};
std::cerr << "********************DZ3Z1**********************" << std::endl;
std::cerr << "Elapsed time - SEQ: " << timeSeq << "." << std::endl;
std::cerr << "Elapsed time - PAR: " << timePar << "." << std::endl;
std::cerr << (std::equal(matC.begin(), matC.end(), matD.begin(), comparator) ? "TEST PASSED" : "TEST FAILED") << std::endl;
std::cerr << "***********************************************" << std::endl;
return 0;
}
| acf9731bfd4dc5de2fa1c2bb108b369c84d876d2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <functional>
#include <algorithm>
#include <ctime>
#define ACCURACY 0.01
#define NUM_OF_GPU_THREADS 2
#define BLOCK_SIZE 32
#define NUM_OF_GPU_BLOCKS 4
#define MAX 3
#define MIN 0
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel << <1, size >> > (dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
// return cudaStatus;
//}
void checkError(cudaError_t err, int line) {
if (cudaSuccess != err) {
std::cerr << "Error " << cudaGetErrorName(err) << " happenend: " << cudaGetErrorString(err) << " at line " << line << std::endl;
exit(-1);
}
}
//void matrixInit(float * matrix, int size) {
// for (int i = 0; i < size; ++i) {
// matrix[i] = rand() % (MAX - MIN + 1) + MIN;
// }
//}
__global__ void sgemmKernel(float * cudaA, float * cudaB, float * cudaC, int m, int n, int k, float alpha, float beta) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float B[BLOCK_SIZE][BLOCK_SIZE];
int row = bx * BLOCK_SIZE + tx;
int col = by * BLOCK_SIZE + ty;
float sum = 0;
for (int i = 0; i < gridDim.y; i++) {
int a_col = i * BLOCK_SIZE + ty;
int a_row = row;
if (a_row < m && a_col < k) {
A[tx][ty] = cudaA[a_row + a_col * m];
}
int b_col = col;
int b_row = i * BLOCK_SIZE + tx;
if (b_row < k && b_col < n){
B[tx][ty] = cudaB[b_row * n + b_col];
}
__syncthreads();
if (row < m && col < n) {
int j_end = ((i + 1) * BLOCK_SIZE < k) ? BLOCK_SIZE : k - i * BLOCK_SIZE;
for (int j = 0; j < j_end; j++) {
sum += A[tx][j] * B[j][ty];
}
}
__syncthreads();
}
if (row < m && col < n) {
cudaC[row + col * m] = 0;
cudaC[row + col * m] = cudaC[row + col * m] * beta + sum * alpha;
}
}
bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v)
{
std::cerr << "Opening file:" << fn << std::endl;
std::fstream f(fn, std::fstream::in);
if (!f.good()) {
return false;
}
// Read # of rows and cols
f >> nr_row;
f >> nr_col;
float data;
std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl;
while (f.good()) {
f >> data;
v.push_back(data);
}
v.pop_back(); // remove the duplicated last element
return true;
}
bool writeColMajorMatrixFile(const char *fn, int nr_row, int nr_col, std::vector<float>&v)
{
std::cerr << "Opening file:" << fn << " for write." << std::endl;
std::fstream f(fn, std::fstream::out);
if (!f.good()) {
return false;
}
// Read # of rows and cols
f << nr_row << " " << nr_col << " ";
std::cerr << "Matrix dimension: " << nr_row << "x" << nr_col << std::endl;
for (int i = 0; i < v.size(); ++i) {
f << v[i] << ' ';
}
f << "\n";
return true;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
float c = 0.0f;
for (int i = 0; i < k; ++i) {
float a = A[mm + i * lda];
float b = B[nn + i * ldb];
c += a * b;
}
C[mm + nn * ldc] = C[mm + nn * ldc] * beta + alpha * c;
}
}
}
void basicSgemm_par(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
float *cudaA, *cudaB, *cudaC;
int sizeA = m * k * sizeof(float), sizeB = k * n * sizeof(float), sizeC = m * n * sizeof(float);
if ((transa != 'N') && (transa != 'n')) {
std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl;
return;
}
if ((transb != 'T') && (transb != 't')) {
std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl;
return;
}
checkError(cudaMalloc(&cudaA, sizeA), __LINE__);
checkError(cudaMemcpy(cudaA, A, sizeA, cudaMemcpyHostToDevice), __LINE__);
checkError(cudaMalloc(&cudaB, sizeB), __LINE__);
checkError(cudaMemcpy(cudaB, B, sizeB, cudaMemcpyHostToDevice), __LINE__);
checkError(cudaMalloc(&cudaC, sizeC), __LINE__);
dim3 dimGrid((m + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
sgemmKernel <<< dimGrid, dimBlock >>> (cudaA, cudaB, cudaC, m, n, k, alpha, beta);
checkError(cudaMemcpy(C, cudaC, sizeC, cudaMemcpyDeviceToHost), __LINE__);
checkError(cudaFree(cudaA), __LINE__);
checkError(cudaFree(cudaB), __LINE__);
checkError(cudaFree(cudaC), __LINE__);
}
int main(int argc, char *argv[]) {
int matArow, matAcol;
int matBrow, matBcol;
std::vector<float> matA;
std::vector<float> matBT;
//int m, n, k;
float timeSeq = 0.0f, timePar = 0.0f;
cudaEvent_t start = cudaEvent_t();
checkError(cudaEventCreate(&start), __LINE__);
cudaEvent_t stop = cudaEvent_t();
checkError(cudaEventCreate(&stop), __LINE__);
if (argc != 4)
{
fprintf(stderr, "Expecting three input filenames\n");
exit(-1);
}
/* Read in data */
// load A
readColMajorMatrixFile(argv[1], matArow, matAcol, matA);
// load B^T
readColMajorMatrixFile(argv[2], matBcol, matBrow, matBT);
/*m = std::stoi(argv[1]);
n = std::stoi(argv[2]);
k = std::stoi(argv[3]);
// A - m*k
// B - k*n
// C - m*n
matArow = m;
matAcol = k;
matBrow = k;
matBcol = n;
// allocate space for A and B
//matrixInit(&matA.front(), m*k);
//matrixInit(&matBT.front(), k*n);*/
// allocate space for C and D
std::vector<float> matC(matArow * matBcol);
std::vector<float> matD(matArow * matBcol);
//clock_t begin = clock();
cudaEventRecord(start, 0);
// Use standard sgemm interface
basicSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matC.front(), matArow);
clock_t end = clock();
//timeSeq = float(end - begin) / CLOCKS_PER_SEC;
checkError(cudaEventRecord(stop, 0), __LINE__);
checkError(cudaEventSynchronize(stop), __LINE__);
checkError(cudaEventElapsedTime(&timeSeq, start, stop), __LINE__);
timeSeq /= 1000;
cudaEventRecord(start, 0);
// Use parallel sgemm interface
basicSgemm_par('N', 'T', matArow, matBcol, matAcol, 1.0f, &matA.front(), matArow, &matBT.front(), matBcol, 0.0f, &matD.front(), matArow);
checkError(cudaEventRecord(stop, 0), __LINE__);
checkError(cudaEventSynchronize(stop), __LINE__);
checkError(cudaEventElapsedTime(&timePar, start, stop), __LINE__);
timePar /= 1000;
checkError(cudaEventDestroy(start), __LINE__);
checkError(cudaEventDestroy(stop), __LINE__);
writeColMajorMatrixFile(argv[3], matArow, matBcol, matC);
std::function<bool(double, double)> comparator = [](double left, double right) {
// Lambda function to compare 2 doubles with ACCURACY
return fabs(left - right) < ACCURACY;
};
std::cerr << "********************DZ3Z1**********************" << std::endl;
std::cerr << "Elapsed time - SEQ: " << timeSeq << "." << std::endl;
std::cerr << "Elapsed time - PAR: " << timePar << "." << std::endl;
std::cerr << (std::equal(matC.begin(), matC.end(), matD.begin(), comparator) ? "TEST PASSED" : "TEST FAILED") << std::endl;
std::cerr << "***********************************************" << std::endl;
return 0;
}
|
7a3630f8e9a8b5e25cadd95decc148b195f5d3e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_erfcinv (int n, double *result, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = erfcinv(y[id]);
}
} | 7a3630f8e9a8b5e25cadd95decc148b195f5d3e7.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_erfcinv (int n, double *result, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = erfcinv(y[id]);
}
} |
57ef7f429a3ea267f505853f143fc9f9d9257e8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "sandbox_Cuda.H"
#include "Config.H"
#include "IntVect.H"
#include "Box.H"
#include "CudaFab.H"
__global__ void
testKernel1(Real* a_fab, IntVect a_ivA)
{
__shared__ IntVect s_iv;
if (threadIdx.x < g_SpaceDim)
{
s_iv[threadIdx.x] = a_ivA[threadIdx.x];
}
IntVect r_iv(s_iv);
r_iv += s_iv;
if (threadIdx.x < g_SpaceDim)
{
a_fab[threadIdx.x] = r_iv[threadIdx.x];
printf("r_iv[%d]: %d ", threadIdx.x, r_iv[threadIdx.x]);
}
}
void
testCuda1(SymbolPair<Real> a_fab)
{
IntVect ivA(D_DECL(2, 3, 4));
IntVect ivB(D_DECL(1, 2, 3));
std::cout << ivA << std::endl;
ivB.max(ivA);
hipLaunchKernelGGL(( testKernel1), dim3(1), dim3(16), 0, 0, static_cast<Real*>(a_fab.device), ivA);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel2(Real* a_fab, Box a_box)
{
__shared__ Box s_box;
if (threadIdx.x < 2*g_SpaceDim)
{
s_box[threadIdx.x] = a_box[threadIdx.x];
}
Box r_box(s_box);
r_box.grow(1);
// __syncthreads();
if (threadIdx.x < 2*g_SpaceDim)
{
a_fab[threadIdx.x] = r_box[threadIdx.x];
printf("r_box[%d]: %d ", threadIdx.x, r_box[threadIdx.x]);
}
}
void
testCuda2(SymbolPair<Real> a_fab)
{
Box box(IntVect(D_DECL(-1, 0, 0)), IntVect(D_DECL(0, 1, 1)));
box.shift(1, 0);
std::cout << box << std::endl;
hipLaunchKernelGGL(( testKernel2), dim3(1), dim3(16), 0, 0, static_cast<Real*>(a_fab.device), box);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel3(Real* a_fab, Box a_fabBox)
{
__shared__ int stride[g_SpaceDim];
__shared__ int offset;
__shared__ int cstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(stride);
offset = a_fabBox.getOffset(stride);
cstride = a_fabBox.size();
}
__syncthreads();
IntVect idxVec;
a_fabBox.linToVec(threadIdx.x, stride, idxVec);
int idxLin0 = a_fabBox.vecToLin0(idxVec, stride);
if (idxLin0 + offset != threadIdx.x)
{
printf("Conversion failed for thread %2d: vec: (%2d,%2d,%2d) lin: %2d\n",
threadIdx.x, idxVec[0], idxVec[1], idxVec[2], idxLin0);
}
a_fab[idxLin0 + offset ] = (Real)-1.0;
a_fab[idxLin0 + offset + cstride] = (Real)-2.0;
}
void
testCuda3(SymbolPair<Real> a_fab, const Box& a_fabBox)
{
CH_assert(a_fabBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel3), dim3(1), dim3(a_fabBox.size()), 0, 0, static_cast<Real*>(a_fab.device),
a_fabBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel4(Real* a_fab, Box a_fabBox, Box a_workBox)
{
__shared__ int fabStride[g_SpaceDim];
__shared__ int fabCstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(fabStride);
fabCstride = a_fabBox.size();
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
int idxLin0 = a_fabBox.vecToLin0(idxVec, fabStride);
a_fab[idxLin0 ] = (Real)1.0;
a_fab[idxLin0 + fabCstride] = (Real)2.0;
}
void
testCuda4(SymbolPair<Real> a_fab, const Box& a_fabBox, const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel4), dim3(1), dim3(a_workBox.size()), 0, 0,
// Add the offset into the pointer address
static_cast<Real*>(a_fab.device) + a_fabBox.getOffset(),
a_fabBox,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel5(Real* a_fabData, Box a_fabBox, int a_fabNcomp, Box a_workBox)
{
__shared__ CudaFab<Real> fab;
if (threadIdx.x == 0)
{
// Add the offset into the pointer address
fab.define(a_fabData, a_fabBox, a_fabNcomp);
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
fab(idxVec, 0) = (Real)3.0;
fab(idxVec, 1) = (Real)4.0;
}
void
testCuda5(SymbolPair<Real> a_fabData, const Box& a_fabBox, const int a_fabNcomp,
const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( testKernel5), dim3(1), dim3(a_workBox.size()), 0, 0,
static_cast<Real*>(a_fabData.device),
a_fabBox,
a_fabNcomp,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel6(CudaFab<Real> a_fabA, CudaFab<Real> a_fabB, const Box a_workBox)
{
__shared__ Real s_slabData[2*3*g_blkSizeGhost*g_blkSizeGhost];
__shared__ SlabFab<Real, 3> s_slabFab;
__shared__ CudaFab<Real> s_fabA;
__shared__ CudaFab<Real> s_fabB;
// Load fab meta-data for Fabs A and B.
{
const int numThrCopy = CudaFab<Real>::numThrCopy();
CH_assert(numThrCopy < blockDim.x);
s_fabA.define(a_fabA, 0, numThrCopy);
s_fabB.define(a_fabB, 0, numThrCopy);
}
// Compute (_Ar_ithmetic) index, saved to avoid repeat linear->vector
// conversion
IntVect ivecAr;
a_workBox.linToVec(threadIdx.x, ivecAr);
// _L_oad/_S_tore index, saved to avoid repeat linear->vector conversion
IntVect ivecLS;
// Set up the cache window
{
Box LSbox(a_workBox); // This is the initial cache window
LSbox.hiVect(2) = LSbox.loVect(2);
LSbox.grow(1);
// Initialize the slab Fab cache. Note that it is shifted one cell towards
// the low end so it can be shifted back at the beginning of the first
// iteration.
LSbox.shift(-1, 2);
int locNDEnd = LSbox.hiVect(2); // vec[2] to end loading initial data
int locNDBeg = locNDEnd - 1; // vec[2] to begin loading initial data
s_slabFab.define(s_slabData, // Data
LSbox, // Initial window
2, // Number of components
2, // Normal direction
locNDBeg, locNDEnd, // Initial loading
ivecLS, // Vector index (output)
0, // Start component in source Fab
s_fabA, // Source data
g_blkSizeGhost*g_blkSizeGhost); // # threads for loading
}
// Loop over slabs
for (int iSlab = 0; iSlab != g_blkSize; ++iSlab)
{
// Shift the slab
s_slabFab.shift(1, ivecLS);
if (threadIdx.x < g_blkSize*g_blkSize)
{
IntVect basis(D_DECL(0, 0, 0));
basis[2] = 1;
s_fabB(ivecAr, 0) = (s_slabFab(ivecAr - basis, 0) +
s_slabFab(ivecAr, 0) +
s_slabFab(ivecAr + basis, 0));
s_fabB(ivecAr, 1) = (s_slabFab(ivecAr - basis, 1) +
s_slabFab(ivecAr, 1) +
s_slabFab(ivecAr + basis, 1));
// Shift the arithmetic IntVect for the next slab iteration
ivecAr[2] += 1;
}
}
}
void
testCuda6(BaseFab<Real> *const a_fabA,
BaseFab<Real> *const a_fabB,
const Box& a_workBox)
{
BaseFabData<Real> *const fabA =
reinterpret_cast<BaseFabData<Real>*>(a_fabA);
CudaFab<Real> cudaFabA;
cudaFabA.define(*fabA);
BaseFabData<Real> *const fabB =
reinterpret_cast<BaseFabData<Real>*>(a_fabB);
CudaFab<Real> cudaFabB;
cudaFabB.define(*fabB);
hipLaunchKernelGGL(( testKernel6), dim3(1), dim3(g_blkSizeGhost*g_blkSizeGhost), 0, 0,
cudaFabA,
cudaFabB,
a_workBox);
}
| 57ef7f429a3ea267f505853f143fc9f9d9257e8d.cu | #include <cstdio>
#include "sandbox_Cuda.H"
#include "Config.H"
#include "IntVect.H"
#include "Box.H"
#include "CudaFab.H"
__global__ void
testKernel1(Real* a_fab, IntVect a_ivA)
{
__shared__ IntVect s_iv;
if (threadIdx.x < g_SpaceDim)
{
s_iv[threadIdx.x] = a_ivA[threadIdx.x];
}
IntVect r_iv(s_iv);
r_iv += s_iv;
if (threadIdx.x < g_SpaceDim)
{
a_fab[threadIdx.x] = r_iv[threadIdx.x];
printf("r_iv[%d]: %d ", threadIdx.x, r_iv[threadIdx.x]);
}
}
void
testCuda1(SymbolPair<Real> a_fab)
{
IntVect ivA(D_DECL(2, 3, 4));
IntVect ivB(D_DECL(1, 2, 3));
std::cout << ivA << std::endl;
ivB.max(ivA);
testKernel1<<<1, 16>>>(static_cast<Real*>(a_fab.device), ivA);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel2(Real* a_fab, Box a_box)
{
__shared__ Box s_box;
if (threadIdx.x < 2*g_SpaceDim)
{
s_box[threadIdx.x] = a_box[threadIdx.x];
}
Box r_box(s_box);
r_box.grow(1);
// __syncthreads();
if (threadIdx.x < 2*g_SpaceDim)
{
a_fab[threadIdx.x] = r_box[threadIdx.x];
printf("r_box[%d]: %d ", threadIdx.x, r_box[threadIdx.x]);
}
}
void
testCuda2(SymbolPair<Real> a_fab)
{
Box box(IntVect(D_DECL(-1, 0, 0)), IntVect(D_DECL(0, 1, 1)));
box.shift(1, 0);
std::cout << box << std::endl;
testKernel2<<<1, 16>>>(static_cast<Real*>(a_fab.device), box);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel3(Real* a_fab, Box a_fabBox)
{
__shared__ int stride[g_SpaceDim];
__shared__ int offset;
__shared__ int cstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(stride);
offset = a_fabBox.getOffset(stride);
cstride = a_fabBox.size();
}
__syncthreads();
IntVect idxVec;
a_fabBox.linToVec(threadIdx.x, stride, idxVec);
int idxLin0 = a_fabBox.vecToLin0(idxVec, stride);
if (idxLin0 + offset != threadIdx.x)
{
printf("Conversion failed for thread %2d: vec: (%2d,%2d,%2d) lin: %2d\n",
threadIdx.x, idxVec[0], idxVec[1], idxVec[2], idxLin0);
}
a_fab[idxLin0 + offset ] = (Real)-1.0;
a_fab[idxLin0 + offset + cstride] = (Real)-2.0;
}
void
testCuda3(SymbolPair<Real> a_fab, const Box& a_fabBox)
{
CH_assert(a_fabBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel3<<<1, a_fabBox.size()>>>(static_cast<Real*>(a_fab.device),
a_fabBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel4(Real* a_fab, Box a_fabBox, Box a_workBox)
{
__shared__ int fabStride[g_SpaceDim];
__shared__ int fabCstride;
if (threadIdx.x == 0)
{
a_fabBox.getStride(fabStride);
fabCstride = a_fabBox.size();
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
int idxLin0 = a_fabBox.vecToLin0(idxVec, fabStride);
a_fab[idxLin0 ] = (Real)1.0;
a_fab[idxLin0 + fabCstride] = (Real)2.0;
}
void
testCuda4(SymbolPair<Real> a_fab, const Box& a_fabBox, const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel4<<<1, a_workBox.size()>>>(
// Add the offset into the pointer address
static_cast<Real*>(a_fab.device) + a_fabBox.getOffset(),
a_fabBox,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel5(Real* a_fabData, Box a_fabBox, int a_fabNcomp, Box a_workBox)
{
__shared__ CudaFab<Real> fab;
if (threadIdx.x == 0)
{
// Add the offset into the pointer address
fab.define(a_fabData, a_fabBox, a_fabNcomp);
}
__syncthreads();
// Get the cell
IntVect idxVec;
{
a_workBox.linToVec(threadIdx.x, idxVec);
}
// Get index into fab
fab(idxVec, 0) = (Real)3.0;
fab(idxVec, 1) = (Real)4.0;
}
void
testCuda5(SymbolPair<Real> a_fabData, const Box& a_fabBox, const int a_fabNcomp,
const Box& a_workBox)
{
CH_assert(a_workBox.size() <= CHDEF_SYSTEM_CUDAATTR_MAX_THREADS_PER_BLOCK);
testKernel5<<<1, a_workBox.size()>>>(
static_cast<Real*>(a_fabData.device),
a_fabBox,
a_fabNcomp,
a_workBox);
}
/*----------------------------------------------------------------------------*/
__global__ void
testKernel6(CudaFab<Real> a_fabA, CudaFab<Real> a_fabB, const Box a_workBox)
{
__shared__ Real s_slabData[2*3*g_blkSizeGhost*g_blkSizeGhost];
__shared__ SlabFab<Real, 3> s_slabFab;
__shared__ CudaFab<Real> s_fabA;
__shared__ CudaFab<Real> s_fabB;
// Load fab meta-data for Fabs A and B.
{
const int numThrCopy = CudaFab<Real>::numThrCopy();
CH_assert(numThrCopy < blockDim.x);
s_fabA.define(a_fabA, 0, numThrCopy);
s_fabB.define(a_fabB, 0, numThrCopy);
}
// Compute (_Ar_ithmetic) index, saved to avoid repeat linear->vector
// conversion
IntVect ivecAr;
a_workBox.linToVec(threadIdx.x, ivecAr);
// _L_oad/_S_tore index, saved to avoid repeat linear->vector conversion
IntVect ivecLS;
// Set up the cache window
{
Box LSbox(a_workBox); // This is the initial cache window
LSbox.hiVect(2) = LSbox.loVect(2);
LSbox.grow(1);
// Initialize the slab Fab cache. Note that it is shifted one cell towards
// the low end so it can be shifted back at the beginning of the first
// iteration.
LSbox.shift(-1, 2);
int locNDEnd = LSbox.hiVect(2); // vec[2] to end loading initial data
int locNDBeg = locNDEnd - 1; // vec[2] to begin loading initial data
s_slabFab.define(s_slabData, // Data
LSbox, // Initial window
2, // Number of components
2, // Normal direction
locNDBeg, locNDEnd, // Initial loading
ivecLS, // Vector index (output)
0, // Start component in source Fab
s_fabA, // Source data
g_blkSizeGhost*g_blkSizeGhost); // # threads for loading
}
// Loop over slabs
for (int iSlab = 0; iSlab != g_blkSize; ++iSlab)
{
// Shift the slab
s_slabFab.shift(1, ivecLS);
if (threadIdx.x < g_blkSize*g_blkSize)
{
IntVect basis(D_DECL(0, 0, 0));
basis[2] = 1;
s_fabB(ivecAr, 0) = (s_slabFab(ivecAr - basis, 0) +
s_slabFab(ivecAr, 0) +
s_slabFab(ivecAr + basis, 0));
s_fabB(ivecAr, 1) = (s_slabFab(ivecAr - basis, 1) +
s_slabFab(ivecAr, 1) +
s_slabFab(ivecAr + basis, 1));
// Shift the arithmetic IntVect for the next slab iteration
ivecAr[2] += 1;
}
}
}
void
testCuda6(BaseFab<Real> *const a_fabA,
BaseFab<Real> *const a_fabB,
const Box& a_workBox)
{
BaseFabData<Real> *const fabA =
reinterpret_cast<BaseFabData<Real>*>(a_fabA);
CudaFab<Real> cudaFabA;
cudaFabA.define(*fabA);
BaseFabData<Real> *const fabB =
reinterpret_cast<BaseFabData<Real>*>(a_fabB);
CudaFab<Real> cudaFabB;
cudaFabB.define(*fabB);
testKernel6<<<1, g_blkSizeGhost*g_blkSizeGhost>>>(
cudaFabA,
cudaFabB,
a_workBox);
}
|
6828a44f6324576022c113dd5625bd1fec956844.hip | // !!! This is a file automatically generated by hipify!!!
#include "particle.cuh"
Particle::Particle()
{
mass = 1;
density = 1;
pressure = 1;
h = 1;
gamma = 1;
A = 10;
nu = 1000;
rho0 = 0.5;
k = 1;
this->set_pos(0, 0, 0);
this->set_vel(0, 0, 0);
this->set_ax(0, 0, 0);
};
void Particle::allocate()
{
hipMalloc((void**)&d_this, sizeof(Particle));
hipMemcpy(d_this, this, sizeof(Particle), hipMemcpyHostToDevice);
}
void Particle::set_mass(float _mass)
{
mass = _mass;
}
void Particle::set_pos(float x, float y, float z)
{
pos[0] = x;
pos[1] = y;
pos[2] = z;
}
void Particle::set_pos_i(float q, int i)
{
pos[i] = q;
}
void Particle::set_vel(float x, float y, float z)
{
vel[0] = x;
vel[1] = y;
vel[2] = z;
}
void Particle::set_vel_i(float q, int i)
{
vel[i] = q;
}
void Particle::set_ax(float x, float y, float z)
{
ax[0] = x;
ax[1] = y;
ax[2] = z;
}
float Particle::get_pos_i(int i)
{
Particle* tmp = new Particle;
hipError_t err = hipMemcpy(tmp, d_this, sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
printf(hipGetErrorString(err));
pos = tmp->pos;
delete tmp;
return pos[i];
}
void update_particles(Particle* ps, Particle* d_ps, size_t ps_num)
{
hipMemcpy(ps, d_ps, ps_num * sizeof(Particle), hipMemcpyDeviceToHost);
}
Particle* Particle::device()
{
return d_this;
}
Particle* device_particles_array(Particle* ps, size_t n)
{
Particle* d_ps;
hipMalloc((void**)&d_ps, n * sizeof(Particle));
hipMemcpy(d_ps, ps, n * sizeof(Particle), hipMemcpyHostToDevice);
//for (size_t i = 0; i < n; ++i)
// hipMemcpy(d_ps + i, ps + i, sizeof(Particle), hipMemcpyHostToDevice);
return d_ps;
}
| 6828a44f6324576022c113dd5625bd1fec956844.cu | #include "particle.cuh"
Particle::Particle()
{
mass = 1;
density = 1;
pressure = 1;
h = 1;
gamma = 1;
A = 10;
nu = 1000;
rho0 = 0.5;
k = 1;
this->set_pos(0, 0, 0);
this->set_vel(0, 0, 0);
this->set_ax(0, 0, 0);
};
void Particle::allocate()
{
cudaMalloc((void**)&d_this, sizeof(Particle));
cudaMemcpy(d_this, this, sizeof(Particle), cudaMemcpyHostToDevice);
}
void Particle::set_mass(float _mass)
{
mass = _mass;
}
void Particle::set_pos(float x, float y, float z)
{
pos[0] = x;
pos[1] = y;
pos[2] = z;
}
void Particle::set_pos_i(float q, int i)
{
pos[i] = q;
}
void Particle::set_vel(float x, float y, float z)
{
vel[0] = x;
vel[1] = y;
vel[2] = z;
}
void Particle::set_vel_i(float q, int i)
{
vel[i] = q;
}
void Particle::set_ax(float x, float y, float z)
{
ax[0] = x;
ax[1] = y;
ax[2] = z;
}
float Particle::get_pos_i(int i)
{
Particle* tmp = new Particle;
cudaError_t err = cudaMemcpy(tmp, d_this, sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
printf(cudaGetErrorString(err));
pos = tmp->pos;
delete tmp;
return pos[i];
}
void update_particles(Particle* ps, Particle* d_ps, size_t ps_num)
{
cudaMemcpy(ps, d_ps, ps_num * sizeof(Particle), cudaMemcpyDeviceToHost);
}
Particle* Particle::device()
{
return d_this;
}
Particle* device_particles_array(Particle* ps, size_t n)
{
Particle* d_ps;
cudaMalloc((void**)&d_ps, n * sizeof(Particle));
cudaMemcpy(d_ps, ps, n * sizeof(Particle), cudaMemcpyHostToDevice);
//for (size_t i = 0; i < n; ++i)
// cudaMemcpy(d_ps + i, ps + i, sizeof(Particle), cudaMemcpyHostToDevice);
return d_ps;
}
|
778ccf6ebaa03467212804079d56ac877d44474c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
hipError_t sumPrefix(unsigned long* input, unsigned long* results, long numbers_size, int threads_in_block, int vector_size);
bool checkForError(const hipError_t cudaStatus, const char text[], unsigned long* dev_input, unsigned long* dev_results) {
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, hipGetErrorString(cudaStatus));
hipFree(dev_input);
hipFree(dev_results);
return true;
}
return false;
}
bool checkForError(const hipError_t cudaStatus, const char text[]) {
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, hipGetErrorString(cudaStatus));
return true;
}
return false;
}
__global__
void prefix_sum_kernel(unsigned long* input, unsigned long* results, unsigned long exp, int numbers_size, int vector_size) {
int start = blockDim.x * blockIdx.x + threadIdx.x;
start *= vector_size;
if (start < numbers_size) {
for (int i = start; i < vector_size + start && i < numbers_size; i++) {
if (i < exp) {
results[i] = input[i];
}
else {
results[i] = input[i] + input[i - exp];
}
}
}
}
__global__
void prefix_sum_kernel_shared(unsigned long* input, unsigned long* results, long exp, int numbers_size, int vector_size) {
extern __shared__ unsigned long input_s[];
int global_start = (blockDim.x * blockIdx.x + threadIdx.x) * vector_size; //
int shared_start = threadIdx.x * vector_size;
for (int i = global_start, shared_i = shared_start; i < vector_size + global_start && i < numbers_size; i++, shared_i++) {
input_s[shared_i] = input[i];
}
__syncthreads();
for (int i = global_start, shared_i = shared_start; i < vector_size + global_start && i < numbers_size; i++, shared_i++) {
if (i < exp) {
results[i] = input_s[shared_i];
}
else {
if (shared_i - exp < 0) {
printf(".");
results[i] = input_s[shared_i] + input[i - exp];
}
else {
printf("#");
results[i] = input_s[shared_i] + input_s[shared_i - exp];
}
}
}
}
__global__
void copy_kernel(unsigned long* input, unsigned long* results, int numbers_size, int vector_size) {
int start = blockDim.x * blockIdx.x + threadIdx.x;
start *= vector_size;
if (start < numbers_size) {
for (int i = start; i < vector_size + start && i < numbers_size; i++) {
input[i] = results[i];
}
}
}
bool test(unsigned long* results, unsigned long* input, long arraySize) {
bool prefix_sum_ok = true;
unsigned long sum = 0;
printf("\n\n");
for (int i = 0; i < arraySize; i++) {
sum += input[i];
if (!(results[i] == sum)) {
printf("BLAD! NIE ZGADZA SIE! oczekiwana = %ld, dostalem = %ld\n", sum, results[i]);
prefix_sum_ok = false;
}
}
return prefix_sum_ok;
}
int main()
{
long numbers_size; // 111111111
long threads_in_block; // 512
int vector_size; // 32
printf("Podaj rozmiar wektora >>> ");
scanf("%d", &numbers_size);
printf("Podaj liczbe watkow w bloku >>> ");
scanf("%d", &threads_in_block);
printf("Podaj rozmiar wektora przetwarzanego przez watek >>> ");
scanf("%d", &vector_size);
unsigned long* input = (unsigned long*)malloc(numbers_size * sizeof(unsigned long));
unsigned long* results = (unsigned long*)malloc(numbers_size * sizeof(unsigned long));
for (int i = 0; i < numbers_size; i++) {
input[i] = i + 1;// +99999;//i + 1;
}
hipError_t cudaStatus = sumPrefix(input, results, numbers_size, threads_in_block, vector_size);
if (checkForError(cudaStatus, "sumPrefix failed!")) {
return 1;
}
test(results, input, numbers_size);
//for (int i = 0; i < numbers_size; i++) {
// if (i % 32 ==0 ) printf("\n");
// if (results[i] < 10) printf("[ %ld]", results[i]);
// else printf("[%ld]", results[i]);
//}
printf("%ld, ", results[numbers_size - 1]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (checkForError(cudaStatus, "hipDeviceReset failed!")) {
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t sumPrefix(unsigned long* input, unsigned long* results, long numbers_size, int threads_in_block, int vector_size)
{
const int num_blocks = (vector_size - 1 + (numbers_size + threads_in_block - 1) / threads_in_block) / vector_size;
const int iterations = ceil((int)log2((float)numbers_size));
long exp = 1;
printf("iterations = %d, numbers_size = %d, threads_in_block = %d, num_blocks = %d", iterations, numbers_size, threads_in_block, num_blocks);
printf("\n shared vector_size: %d", (int)(sizeof(unsigned long) * vector_size * threads_in_block));
hipError_t cudaStatus;
unsigned long* dev_input = 0;
unsigned long* dev_results = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (checkForError(cudaStatus, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = hipMalloc((void**)&dev_input, numbers_size * sizeof(unsigned long));
if (checkForError(cudaStatus, "hipMalloc (dev_input) failed!", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = hipMalloc((void**)&dev_results, numbers_size * sizeof(unsigned long));
if (checkForError(cudaStatus, "hipMalloc (dev_results) failed!", dev_input, dev_results)) {
return cudaStatus;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_input, input, numbers_size * sizeof(unsigned long), hipMemcpyHostToDevice);
if (checkForError(cudaStatus, "hipMemcpy (host -> dev, dev_input) failed!", dev_input, dev_results)) {
return cudaStatus;
}
printf("\n\nSTART"); fflush(stdout);
int i = 0;
for (i = 0; i <= iterations; i++) {
prefix_sum_kernel_shared << <num_blocks, threads_in_block, sizeof(unsigned long) * vector_size * threads_in_block>> > (dev_input, dev_results, exp, numbers_size, vector_size);
exp *= 2;
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (checkForError(cudaStatus, "prefix_sum_kernel launch failed!", dev_input, dev_results)) {
return cudaStatus;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (checkForError(cudaStatus, "hipDeviceSynchronize on \"prefix_sum_kernel\" returned error code.", dev_input, dev_results)) {
return cudaStatus;
}
copy_kernel << <num_blocks, threads_in_block >> > (dev_input, dev_results, numbers_size, vector_size);
cudaStatus = hipGetLastError();
if (checkForError(cudaStatus, "copy_kernel launch failed", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = hipDeviceSynchronize();
if (checkForError(cudaStatus, "hipDeviceSynchronize on \"copy_kernel\" returned error code", dev_input, dev_results)) {
return cudaStatus;
}
}
printf("\nSTOP"); fflush(stdout);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(results, dev_results, numbers_size * sizeof(unsigned long), hipMemcpyDeviceToHost);
if (checkForError(cudaStatus, "hipMemcpy (dev -> host, dev_results) failed!", dev_input, dev_results)) {
return cudaStatus;
}
hipFree(dev_input);
hipFree(dev_results);
return cudaStatus;
}
| 778ccf6ebaa03467212804079d56ac877d44474c.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
cudaError_t sumPrefix(unsigned long* input, unsigned long* results, long numbers_size, int threads_in_block, int vector_size);
bool checkForError(const cudaError_t cudaStatus, const char text[], unsigned long* dev_input, unsigned long* dev_results) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, cudaGetErrorString(cudaStatus));
cudaFree(dev_input);
cudaFree(dev_results);
return true;
}
return false;
}
bool checkForError(const cudaError_t cudaStatus, const char text[]) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n%s \nError code: %d \nStatus: %s \n\n", text, cudaStatus, cudaGetErrorString(cudaStatus));
return true;
}
return false;
}
__global__
void prefix_sum_kernel(unsigned long* input, unsigned long* results, unsigned long exp, int numbers_size, int vector_size) {
int start = blockDim.x * blockIdx.x + threadIdx.x;
start *= vector_size;
if (start < numbers_size) {
for (int i = start; i < vector_size + start && i < numbers_size; i++) {
if (i < exp) {
results[i] = input[i];
}
else {
results[i] = input[i] + input[i - exp];
}
}
}
}
__global__
void prefix_sum_kernel_shared(unsigned long* input, unsigned long* results, long exp, int numbers_size, int vector_size) {
extern __shared__ unsigned long input_s[];
int global_start = (blockDim.x * blockIdx.x + threadIdx.x) * vector_size; //
int shared_start = threadIdx.x * vector_size;
for (int i = global_start, shared_i = shared_start; i < vector_size + global_start && i < numbers_size; i++, shared_i++) {
input_s[shared_i] = input[i];
}
__syncthreads();
for (int i = global_start, shared_i = shared_start; i < vector_size + global_start && i < numbers_size; i++, shared_i++) {
if (i < exp) {
results[i] = input_s[shared_i];
}
else {
if (shared_i - exp < 0) {
printf(".");
results[i] = input_s[shared_i] + input[i - exp];
}
else {
printf("#");
results[i] = input_s[shared_i] + input_s[shared_i - exp];
}
}
}
}
__global__
void copy_kernel(unsigned long* input, unsigned long* results, int numbers_size, int vector_size) {
int start = blockDim.x * blockIdx.x + threadIdx.x;
start *= vector_size;
if (start < numbers_size) {
for (int i = start; i < vector_size + start && i < numbers_size; i++) {
input[i] = results[i];
}
}
}
bool test(unsigned long* results, unsigned long* input, long arraySize) {
bool prefix_sum_ok = true;
unsigned long sum = 0;
printf("\n\n");
for (int i = 0; i < arraySize; i++) {
sum += input[i];
if (!(results[i] == sum)) {
printf("BLAD! NIE ZGADZA SIE! oczekiwana = %ld, dostalem = %ld\n", sum, results[i]);
prefix_sum_ok = false;
}
}
return prefix_sum_ok;
}
int main()
{
long numbers_size; // 111111111
long threads_in_block; // 512
int vector_size; // 32
printf("Podaj rozmiar wektora >>> ");
scanf("%d", &numbers_size);
printf("Podaj liczbe watkow w bloku >>> ");
scanf("%d", &threads_in_block);
printf("Podaj rozmiar wektora przetwarzanego przez watek >>> ");
scanf("%d", &vector_size);
unsigned long* input = (unsigned long*)malloc(numbers_size * sizeof(unsigned long));
unsigned long* results = (unsigned long*)malloc(numbers_size * sizeof(unsigned long));
for (int i = 0; i < numbers_size; i++) {
input[i] = i + 1;// +99999;//i + 1;
}
cudaError_t cudaStatus = sumPrefix(input, results, numbers_size, threads_in_block, vector_size);
if (checkForError(cudaStatus, "sumPrefix failed!")) {
return 1;
}
test(results, input, numbers_size);
//for (int i = 0; i < numbers_size; i++) {
// if (i % 32 ==0 ) printf("\n");
// if (results[i] < 10) printf("[ %ld]", results[i]);
// else printf("[%ld]", results[i]);
//}
printf("%ld, ", results[numbers_size - 1]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (checkForError(cudaStatus, "cudaDeviceReset failed!")) {
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t sumPrefix(unsigned long* input, unsigned long* results, long numbers_size, int threads_in_block, int vector_size)
{
const int num_blocks = (vector_size - 1 + (numbers_size + threads_in_block - 1) / threads_in_block) / vector_size;
const int iterations = ceil((int)log2((float)numbers_size));
long exp = 1;
printf("iterations = %d, numbers_size = %d, threads_in_block = %d, num_blocks = %d", iterations, numbers_size, threads_in_block, num_blocks);
printf("\n shared vector_size: %d", (int)(sizeof(unsigned long) * vector_size * threads_in_block));
cudaError_t cudaStatus;
unsigned long* dev_input = 0;
unsigned long* dev_results = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (checkForError(cudaStatus, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_input, numbers_size * sizeof(unsigned long));
if (checkForError(cudaStatus, "cudaMalloc (dev_input) failed!", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = cudaMalloc((void**)&dev_results, numbers_size * sizeof(unsigned long));
if (checkForError(cudaStatus, "cudaMalloc (dev_results) failed!", dev_input, dev_results)) {
return cudaStatus;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, numbers_size * sizeof(unsigned long), cudaMemcpyHostToDevice);
if (checkForError(cudaStatus, "cudaMemcpy (host -> dev, dev_input) failed!", dev_input, dev_results)) {
return cudaStatus;
}
printf("\n\nSTART"); fflush(stdout);
int i = 0;
for (i = 0; i <= iterations; i++) {
prefix_sum_kernel_shared << <num_blocks, threads_in_block, sizeof(unsigned long) * vector_size * threads_in_block>> > (dev_input, dev_results, exp, numbers_size, vector_size);
exp *= 2;
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (checkForError(cudaStatus, "prefix_sum_kernel launch failed!", dev_input, dev_results)) {
return cudaStatus;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (checkForError(cudaStatus, "cudaDeviceSynchronize on \"prefix_sum_kernel\" returned error code.", dev_input, dev_results)) {
return cudaStatus;
}
copy_kernel << <num_blocks, threads_in_block >> > (dev_input, dev_results, numbers_size, vector_size);
cudaStatus = cudaGetLastError();
if (checkForError(cudaStatus, "copy_kernel launch failed", dev_input, dev_results)) {
return cudaStatus;
}
cudaStatus = cudaDeviceSynchronize();
if (checkForError(cudaStatus, "cudaDeviceSynchronize on \"copy_kernel\" returned error code", dev_input, dev_results)) {
return cudaStatus;
}
}
printf("\nSTOP"); fflush(stdout);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(results, dev_results, numbers_size * sizeof(unsigned long), cudaMemcpyDeviceToHost);
if (checkForError(cudaStatus, "cudaMemcpy (dev -> host, dev_results) failed!", dev_input, dev_results)) {
return cudaStatus;
}
cudaFree(dev_input);
cudaFree(dev_results);
return cudaStatus;
}
|
f2de9dc320f8e240f43dabed0c45a118c9a55864.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/rowwise_sampling.cu
* \brief rowwise sampling
*/
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <hiprand/hiprand_kernel.h>
#include <numeric>
#include "./dgl_cub.cuh"
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
using namespace dgl::aten::cuda;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int WARP_SIZE = 32;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x*blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(static_cast<IdType>(num_picks), in_ptr[in_row+1]-in_ptr[in_row]);
if (out_row == num_rows-1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x*blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row+1]-in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows-1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise sampling on a CSR matrix, and generate a COO matrix,
* without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam BLOCK_WARPS The number of rows each thread block runs in parallel.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int BLOCK_WARPS, int TILE_SIZE>
__global__ void _CSRRowWiseSampleKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x*TILE_SIZE+threadIdx.y;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x+1)*TILE_SIZE, num_rows);
hiprandState_t rng;
hiprand_init(rand_seed*gridDim.x+blockIdx.x, threadIdx.y*WARP_SIZE+threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row+1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row
for (int idx = threadIdx.x; idx < deg; idx += WARP_SIZE) {
const IdType in_idx = in_row_start+idx;
out_rows[out_row_start+idx] = row;
out_cols[out_row_start+idx] = in_index[in_idx];
out_idxs[out_row_start+idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx+=WARP_SIZE) {
out_idxs[out_row_start+idx] = idx;
}
__syncwarp();
for (int idx = num_picks+threadIdx.x; idx < deg; idx+=WARP_SIZE) {
const int num = hiprand(&rng)%(idx+1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs+out_row_start+num, idx);
}
}
__syncwarp();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += WARP_SIZE) {
const IdType perm_idx = out_idxs[out_row_start+idx]+in_row_start;
out_rows[out_row_start+idx] = row;
out_cols[out_row_start+idx] = in_index[perm_idx];
if (data) {
out_idxs[out_row_start+idx] = data[perm_idx];
}
}
}
out_row += BLOCK_WARPS;
}
}
/**
* @brief Perform row-wise sampling on a CSR matrix, and generate a COO matrix,
* with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam BLOCK_WARPS The number of rows each thread block runs in parallel.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int BLOCK_WARPS, int TILE_SIZE>
__global__ void _CSRRowWiseSampleReplaceKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == WARP_SIZE);
int64_t out_row = blockIdx.x*TILE_SIZE+threadIdx.y;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x+1)*TILE_SIZE, num_rows);
hiprandState_t rng;
hiprand_init(rand_seed*gridDim.x+blockIdx.x, threadIdx.y*WARP_SIZE+threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row+1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += blockDim.x) {
const int64_t edge = hiprand(&rng) % deg;
const int64_t out_idx = out_row_start+idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start+edge];
out_idxs[out_idx] = data ? data[in_row_start+edge] : in_row_start+edge;
}
}
out_row += BLOCK_WARPS;
}
}
} // namespace
/////////////////////////////// CSR ///////////////////////////////
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(CSRMatrix mat,
IdArray rows,
const int64_t num_picks,
const bool replace) {
const auto& ctx = mat.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
// TODO(dlasalle): Once the device api supports getting the stream from the
// context, that should be used instead of the default stream here.
hipStream_t stream = 0;
const int64_t num_rows = rows->shape[0];
const IdType * const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data);
const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* const data = CSRHasData(mat) ?
static_cast<IdType*>(mat.data->data) : nullptr;
// compute degree
IdType * out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows+block.x-1)/block.x);
hipLaunchKernelGGL(( _CSRRowWiseSampleDegreeReplaceKernel), dim3(grid), dim3(block), 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows+block.x-1)/block.x);
hipLaunchKernelGGL(( _CSRRowWiseSampleDegreeKernel), dim3(grid), dim3(block), 0, stream,
num_picks, num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType * out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
hipEvent_t copyEvent;
CUDA_CALL(hipEventCreate(©Event));
// TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on
// a cudaevent
IdType new_len;
device->CopyDataFromTo(out_ptr, num_rows*sizeof(new_len), &new_len, 0,
sizeof(new_len),
ctx,
DGLContext{kDLCPU, 0},
mat.indptr->dtype,
stream);
CUDA_CALL(hipEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
if (replace) {
constexpr int BLOCK_WARPS = 128/WARP_SIZE;
// the number of rows each thread block will cover
constexpr int TILE_SIZE = BLOCK_WARPS*16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((num_rows+TILE_SIZE-1)/TILE_SIZE);
hipLaunchKernelGGL(( _CSRRowWiseSampleReplaceKernel<IdType, BLOCK_WARPS, TILE_SIZE>), dim3(grid), dim3(block), 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
} else {
constexpr int BLOCK_WARPS = 128/WARP_SIZE;
// the number of rows each thread block will cover
constexpr int TILE_SIZE = BLOCK_WARPS*16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((num_rows+TILE_SIZE-1)/TILE_SIZE);
hipLaunchKernelGGL(( _CSRRowWiseSampleKernel<IdType, BLOCK_WARPS, TILE_SIZE>), dim3(grid), dim3(block), 0, stream,
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(hipEventSynchronize(copyEvent));
CUDA_CALL(hipEventDestroy(copyEvent));
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(mat.num_rows, mat.num_cols, picked_row,
picked_col, picked_idx);
}
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
| f2de9dc320f8e240f43dabed0c45a118c9a55864.cu | /*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/rowwise_sampling.cu
* \brief rowwise sampling
*/
#include <dgl/random.h>
#include <dgl/runtime/device_api.h>
#include <curand_kernel.h>
#include <numeric>
#include "./dgl_cub.cuh"
#include "../../array/cuda/atomic.cuh"
#include "../../runtime/cuda/cuda_common.h"
using namespace dgl::aten::cuda;
namespace dgl {
namespace aten {
namespace impl {
namespace {
constexpr int WARP_SIZE = 32;
/**
* @brief Compute the size of each row in the sampled CSR, without replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x*blockDim.x;
if (tIdx < num_rows) {
const int in_row = in_rows[tIdx];
const int out_row = tIdx;
out_deg[out_row] = min(static_cast<IdType>(num_picks), in_ptr[in_row+1]-in_ptr[in_row]);
if (out_row == num_rows-1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Compute the size of each row in the sampled CSR, with replacement.
*
* @tparam IdType The type of node and edge indexes.
* @param num_picks The number of non-zero entries to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The index where each row's edges start.
* @param out_deg The size of each row in the sampled matrix, as indexed by
* `in_rows` (output).
*/
template<typename IdType>
__global__ void _CSRRowWiseSampleDegreeReplaceKernel(
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
IdType * const out_deg) {
const int tIdx = threadIdx.x + blockIdx.x*blockDim.x;
if (tIdx < num_rows) {
const int64_t in_row = in_rows[tIdx];
const int64_t out_row = tIdx;
if (in_ptr[in_row+1]-in_ptr[in_row] == 0) {
out_deg[out_row] = 0;
} else {
out_deg[out_row] = static_cast<IdType>(num_picks);
}
if (out_row == num_rows-1) {
// make the prefixsum work
out_deg[num_rows] = 0;
}
}
}
/**
* @brief Perform row-wise sampling on a CSR matrix, and generate a COO matrix,
* without replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam BLOCK_WARPS The number of rows each thread block runs in parallel.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int BLOCK_WARPS, int TILE_SIZE>
__global__ void _CSRRowWiseSampleKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == WARP_SIZE);
assert(blockDim.y == BLOCK_WARPS);
int64_t out_row = blockIdx.x*TILE_SIZE+threadIdx.y;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x+1)*TILE_SIZE, num_rows);
curandState rng;
curand_init(rand_seed*gridDim.x+blockIdx.x, threadIdx.y*WARP_SIZE+threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t deg = in_ptr[row+1] - in_row_start;
const int64_t out_row_start = out_ptr[out_row];
if (deg <= num_picks) {
// just copy row
for (int idx = threadIdx.x; idx < deg; idx += WARP_SIZE) {
const IdType in_idx = in_row_start+idx;
out_rows[out_row_start+idx] = row;
out_cols[out_row_start+idx] = in_index[in_idx];
out_idxs[out_row_start+idx] = data ? data[in_idx] : in_idx;
}
} else {
// generate permutation list via reservoir algorithm
for (int idx = threadIdx.x; idx < num_picks; idx+=WARP_SIZE) {
out_idxs[out_row_start+idx] = idx;
}
__syncwarp();
for (int idx = num_picks+threadIdx.x; idx < deg; idx+=WARP_SIZE) {
const int num = curand(&rng)%(idx+1);
if (num < num_picks) {
// use max so as to achieve the replacement order the serial
// algorithm would have
AtomicMax(out_idxs+out_row_start+num, idx);
}
}
__syncwarp();
// copy permutation over
for (int idx = threadIdx.x; idx < num_picks; idx += WARP_SIZE) {
const IdType perm_idx = out_idxs[out_row_start+idx]+in_row_start;
out_rows[out_row_start+idx] = row;
out_cols[out_row_start+idx] = in_index[perm_idx];
if (data) {
out_idxs[out_row_start+idx] = data[perm_idx];
}
}
}
out_row += BLOCK_WARPS;
}
}
/**
* @brief Perform row-wise sampling on a CSR matrix, and generate a COO matrix,
* with replacement.
*
* @tparam IdType The ID type used for matrices.
* @tparam BLOCK_WARPS The number of rows each thread block runs in parallel.
* @tparam TILE_SIZE The number of rows covered by each threadblock.
* @param rand_seed The random seed to use.
* @param num_picks The number of non-zeros to pick per row.
* @param num_rows The number of rows to pick.
* @param in_rows The set of rows to pick.
* @param in_ptr The indptr array of the input CSR.
* @param in_index The indices array of the input CSR.
* @param data The data array of the input CSR.
* @param out_ptr The offset to write each row to in the output COO.
* @param out_rows The rows of the output COO (output).
* @param out_cols The columns of the output COO (output).
* @param out_idxs The data array of the output COO (output).
*/
template<typename IdType, int BLOCK_WARPS, int TILE_SIZE>
__global__ void _CSRRowWiseSampleReplaceKernel(
const uint64_t rand_seed,
const int64_t num_picks,
const int64_t num_rows,
const IdType * const in_rows,
const IdType * const in_ptr,
const IdType * const in_index,
const IdType * const data,
const IdType * const out_ptr,
IdType * const out_rows,
IdType * const out_cols,
IdType * const out_idxs) {
// we assign one warp per row
assert(blockDim.x == WARP_SIZE);
int64_t out_row = blockIdx.x*TILE_SIZE+threadIdx.y;
const int64_t last_row = min(static_cast<int64_t>(blockIdx.x+1)*TILE_SIZE, num_rows);
curandState rng;
curand_init(rand_seed*gridDim.x+blockIdx.x, threadIdx.y*WARP_SIZE+threadIdx.x, 0, &rng);
while (out_row < last_row) {
const int64_t row = in_rows[out_row];
const int64_t in_row_start = in_ptr[row];
const int64_t out_row_start = out_ptr[out_row];
const int64_t deg = in_ptr[row+1] - in_row_start;
if (deg > 0) {
// each thread then blindly copies in rows only if deg > 0.
for (int idx = threadIdx.x; idx < num_picks; idx += blockDim.x) {
const int64_t edge = curand(&rng) % deg;
const int64_t out_idx = out_row_start+idx;
out_rows[out_idx] = row;
out_cols[out_idx] = in_index[in_row_start+edge];
out_idxs[out_idx] = data ? data[in_row_start+edge] : in_row_start+edge;
}
}
out_row += BLOCK_WARPS;
}
}
} // namespace
/////////////////////////////// CSR ///////////////////////////////
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRRowWiseSamplingUniform(CSRMatrix mat,
IdArray rows,
const int64_t num_picks,
const bool replace) {
const auto& ctx = mat.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
// TODO(dlasalle): Once the device api supports getting the stream from the
// context, that should be used instead of the default stream here.
cudaStream_t stream = 0;
const int64_t num_rows = rows->shape[0];
const IdType * const slice_rows = static_cast<const IdType*>(rows->data);
IdArray picked_row = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_col = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
IdArray picked_idx = NewIdArray(num_rows * num_picks, ctx, sizeof(IdType) * 8);
const IdType * const in_ptr = static_cast<const IdType*>(mat.indptr->data);
const IdType * const in_cols = static_cast<const IdType*>(mat.indices->data);
IdType* const out_rows = static_cast<IdType*>(picked_row->data);
IdType* const out_cols = static_cast<IdType*>(picked_col->data);
IdType* const out_idxs = static_cast<IdType*>(picked_idx->data);
const IdType* const data = CSRHasData(mat) ?
static_cast<IdType*>(mat.data->data) : nullptr;
// compute degree
IdType * out_deg = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType)));
if (replace) {
const dim3 block(512);
const dim3 grid((num_rows+block.x-1)/block.x);
_CSRRowWiseSampleDegreeReplaceKernel<<<grid, block, 0, stream>>>(
num_picks, num_rows, slice_rows, in_ptr, out_deg);
} else {
const dim3 block(512);
const dim3 grid((num_rows+block.x-1)/block.x);
_CSRRowWiseSampleDegreeKernel<<<grid, block, 0, stream>>>(
num_picks, num_rows, slice_rows, in_ptr, out_deg);
}
// fill out_ptr
IdType * out_ptr = static_cast<IdType*>(
device->AllocWorkspace(ctx, (num_rows+1)*sizeof(IdType)));
size_t prefix_temp_size = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(nullptr, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
void * prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(prefix_temp, prefix_temp_size,
out_deg,
out_ptr,
num_rows+1,
stream));
device->FreeWorkspace(ctx, prefix_temp);
device->FreeWorkspace(ctx, out_deg);
cudaEvent_t copyEvent;
CUDA_CALL(cudaEventCreate(©Event));
// TODO(dlasalle): use pinned memory to overlap with the actual sampling, and wait on
// a cudaevent
IdType new_len;
device->CopyDataFromTo(out_ptr, num_rows*sizeof(new_len), &new_len, 0,
sizeof(new_len),
ctx,
DGLContext{kDLCPU, 0},
mat.indptr->dtype,
stream);
CUDA_CALL(cudaEventRecord(copyEvent, stream));
const uint64_t random_seed = RandomEngine::ThreadLocal()->RandInt(1000000000);
// select edges
if (replace) {
constexpr int BLOCK_WARPS = 128/WARP_SIZE;
// the number of rows each thread block will cover
constexpr int TILE_SIZE = BLOCK_WARPS*16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((num_rows+TILE_SIZE-1)/TILE_SIZE);
_CSRRowWiseSampleReplaceKernel<IdType, BLOCK_WARPS, TILE_SIZE><<<grid, block, 0, stream>>>(
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
} else {
constexpr int BLOCK_WARPS = 128/WARP_SIZE;
// the number of rows each thread block will cover
constexpr int TILE_SIZE = BLOCK_WARPS*16;
const dim3 block(WARP_SIZE, BLOCK_WARPS);
const dim3 grid((num_rows+TILE_SIZE-1)/TILE_SIZE);
_CSRRowWiseSampleKernel<IdType, BLOCK_WARPS, TILE_SIZE><<<grid, block, 0, stream>>>(
random_seed,
num_picks,
num_rows,
slice_rows,
in_ptr,
in_cols,
data,
out_ptr,
out_rows,
out_cols,
out_idxs);
}
device->FreeWorkspace(ctx, out_ptr);
// wait for copying `new_len` to finish
CUDA_CALL(cudaEventSynchronize(copyEvent));
CUDA_CALL(cudaEventDestroy(copyEvent));
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
return COOMatrix(mat.num_rows, mat.num_cols, picked_row,
picked_col, picked_idx);
}
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int32_t>(
CSRMatrix, IdArray, int64_t, bool);
template COOMatrix CSRRowWiseSamplingUniform<kDLGPU, int64_t>(
CSRMatrix, IdArray, int64_t, bool);
} // namespace impl
} // namespace aten
} // namespace dgl
|
dc5d5dcf5dc34e69e5433553a2b3d81744f0ceab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#define BLOCK_SIZE 1024
#define UINT32_BLOCK_STEP_2 64 // 16X4
#define BLOCK_SIZE_4 256
#define CLAMP_1(x) x < 0 ? 0 : (x > 1 ? 1 : x)
#define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x)
#define CLAMP_int8(x) x < -128 ? -128 : (x > 127 ? 127 : x)
// https://en.wikipedia.org/wiki/YUV#Y%E2%80%B2UV420p_(and_Y%E2%80%B2V12_or_YV12)_to_RGB888_conversion
//#define YUV_TO_RGB( Y, U, V, R, G, B ) \
// do \
// { \
// int rTmp = Y + (1.370705 * V); \
// int gTmp = Y - (0.698001 * V) - (0.337633 * U); \
// int bTmp = Y + (1.732446 * U); \
// R = CLAMP_255(rTmp); \
// G = CLAMP_255(gTmp); \
// B = CLAMP_255(bTmp); \
// } while (0)
#define YUV_TO_RGB( Y, U, V, R, G, B ) \
do \
{ \
float rTmp = Y + __fmul_rn (1.370705, V); \
float gTmp = Y - __fmul_rn (0.698001, V) - __fmul_rn (0.337633, U); \
float bTmp = Y + __fmul_rn (1.732446, U); \
R = CLAMP_255(rTmp); \
G = CLAMP_255(gTmp); \
B = CLAMP_255(bTmp); \
} while (0)
#define RGB_TO_Y( R, G, B, Y ) \
do \
{ \
int yTmp = __fmul_rn(R, 0.299) + __fmul_rn (0.587, G) + __fmul_rn (0.114, B); \
Y = CLAMP_255(yTmp); \
} while (0)
#define RGB_TO_UV( R, G, B, U, V ) \
do \
{ \
int uTmp = __fmul_rn(B, 0.436) - __fmul_rn (0.289, G) - __fmul_rn (0.147, R); \
int vTmp = __fmul_rn(R, 0.615) - __fmul_rn (0.515, G) - __fmul_rn (0.1, B); \
U = 128 + (CLAMP_int8(uTmp)); \
V = 128 + (CLAMP_int8(vTmp)); \
} while (0)
#define RGB_TO_HSV(R, G, B, H, S, V) \
do \
{ \
Npp32f nNormalizedR = __fmul_rn(R, 0.003921569F); /* 255.0F*/ \
Npp32f nNormalizedG = __fmul_rn(G, 0.003921569F); \
Npp32f nNormalizedB = __fmul_rn(B, 0.003921569F); \
Npp32f nS; \
Npp32f nH; \
/* Value*/ \
Npp32f nV = fmaxf(nNormalizedR, nNormalizedG); \
nV = fmaxf(nV, nNormalizedB); \
/*Saturation*/ \
Npp32f nTemp = fminf(nNormalizedR, nNormalizedG); \
nTemp = fminf(nTemp, nNormalizedB); \
Npp32f nDivisor = __fsub_rn(nV, nTemp); \
if (nV == 0.0F) /*achromatics case*/ \
{ \
nS = 0.0F; \
nH = 0.0F; \
} \
else /*chromatics case*/ \
{ \
nS = __fdiv_rn(nDivisor, nV); \
} \
/* Hue:*/ \
Npp32f nCr = __fdiv_rn(__fsub_rn(nV, nNormalizedR), nDivisor); \
Npp32f nCg = __fdiv_rn(__fsub_rn(nV, nNormalizedG), nDivisor); \
Npp32f nCb = __fdiv_rn(__fsub_rn(nV, nNormalizedB), nDivisor); \
if (nNormalizedR == nV) \
nH = nCb - nCg; \
else if (nNormalizedG == nV) \
nH = __fadd_rn(2.0F, __fsub_rn(nCr, nCb)); \
else if (nNormalizedB == nV) \
nH = __fadd_rn(4.0F, __fsub_rn(nCg, nCr)); \
nH = __fmul_rn(nH, 0.166667F); /* 6.0F*/ \
if (nH < 0.0F) \
nH = __fadd_rn(nH, 1.0F); \
H = CLAMP_1(nH); \
S = CLAMP_1(nS); \
V = CLAMP_1(nV); \
\
} while(0)
#define HSV_TO_RGB(nNormalizedH, nNormalizedS, nNormalizedV, R, G, B) \
do \
{ \
Npp32f nR; \
Npp32f nG; \
Npp32f nB; \
if (nNormalizedS == 0.0F) \
{ \
nR = nG = nB = nNormalizedV; \
} \
else \
{ \
if (nNormalizedH == 1.0F) \
nNormalizedH = 0.0F; \
else \
{ \
/* 0.1667F*/ \
nNormalizedH = __fmul_rn(nNormalizedH, 6.0F); \
} \
} \
Npp32f nI = floorf(nNormalizedH); \
Npp32f nF = nNormalizedH - nI; \
Npp32f nM = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, nNormalizedS)); \
Npp32f nN = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, __fmul_rn(nNormalizedS, nF) ) ); \
Npp32f nK = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, __fmul_rn(nNormalizedS, __fsub_rn(1.0F, nF)) ) ); \
if (nI == 0.0F) \
{ \
nR = nNormalizedV; nG = nK; nB = nM; \
} \
else if (nI == 1.0F) \
{ \
nR = nN; nG = nNormalizedV; nB = nM; \
} \
else if (nI == 2.0F) \
{ \
nR = nM; nG = nNormalizedV; nB = nK; \
} \
else if (nI == 3.0F) \
{ \
nR = nM; nG = nN; nB = nNormalizedV; \
} \
else if (nI == 4.0F) \
{ \
nR = nK; nG = nM; nB = nNormalizedV; \
} \
else if (nI == 5.0F) \
{ \
nR = nNormalizedV; nG = nM; nB = nN; \
} \
R = CLAMP_255(__fmul_rn(nR, 255.0F)); \
G = CLAMP_255(__fmul_rn(nG, 255.0F)); \
B = CLAMP_255(__fmul_rn(nB, 255.0F)); \
\
} while(0)
#define RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation) \
do \
{ \
Npp32f H, S, V; \
RGB_TO_HSV(r, g, b, H, S, V); \
H = CLAMP_1(__fadd_rn(H, hue)); \
S = CLAMP_1(__fmul_rn(S, saturation)); \
HSV_TO_RGB(H, S, V, R, G, B); \
} while(0)
#define BRIGHNESS_CONTRAST(input, output, brightness, contrast) \
do \
{ \
output = __fadd_rn(__fmul_rn(input, contrast), brightness); \
output = CLAMP_255(output); \
} while(0) \
#define BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast) \
do \
{ \
BRIGHNESS_CONTRAST(r, r, brightness, contrast); \
BRIGHNESS_CONTRAST(g, g, brightness, contrast); \
BRIGHNESS_CONTRAST(b, b, brightness, contrast); \
} while(0)
#define YUVHUESATURATIONADJUST_Y(y, u, v, Y, brightness, contrast, hue, saturation) \
do \
{ \
Npp32f r, g, b; \
YUV_TO_RGB(y, u, v, r, g, b); \
BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast); \
Npp8u R, G, B; \
RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation); \
RGB_TO_Y(R, G, B, Y); \
} while(0)
#define YUVHUESATURATIONADJUST(y, u, v, Y, U, V, brightness, contrast, hue, saturation) \
do \
{ \
Npp32f r, g, b; \
YUV_TO_RGB(y, u, v, r, g, b); \
BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast); \
Npp8u R, G, B; \
RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation); \
RGB_TO_Y(R, G, B, Y); \
RGB_TO_UV(R, G, B, U, V); \
} while (0)
__global__ void yuv420torgb(const uchar4* Y, const uint32_t* U, const uint32_t* V, uchar4* r, uchar4* g, uchar4* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
__shared__ uint32_t u_data[BLOCK_SIZE_4];
__shared__ uint32_t v_data[BLOCK_SIZE_4];
// read for every 4 frames once
if (threadIdx.x % 2 == 0 && threadIdx.y % 2 == 0)
{
// 16*threadIdx.y*0.5 + threadIdx.x*0.5
auto uvThreadOffset = (threadIdx.y << 3) + (threadIdx.x >> 1);
auto uvOffset = (y >> 1) * (step_uv)+(x >> 1);
u_data[uvThreadOffset] = U[uvOffset];
v_data[uvThreadOffset] = V[uvOffset];
}
__syncthreads();
// 32x32x4 y, r, g, b values
// 16x16x4 u, v values
auto u_data_uint8 = reinterpret_cast<uint8_t*>(u_data);
auto v_data_uint8 = reinterpret_cast<uint8_t*>(v_data);
auto uvThreadOffset = (threadIdx.y >> 1)*UINT32_BLOCK_STEP_2 + (threadIdx.x << 1);
int u_value = u_data_uint8[uvThreadOffset] - 128;
int v_value = v_data_uint8[uvThreadOffset] - 128;
YUV_TO_RGB(Y[offset].x, u_value, v_value, r[offset].x, g[offset].x, b[offset].x);
YUV_TO_RGB(Y[offset].y, u_value, v_value, r[offset].y, g[offset].y, b[offset].y);
uvThreadOffset += 1;
u_value = u_data_uint8[uvThreadOffset] - 128;
v_value = v_data_uint8[uvThreadOffset] - 128;
YUV_TO_RGB(Y[offset].z, u_value, v_value, r[offset].z, g[offset].z, b[offset].z);
YUV_TO_RGB(Y[offset].w, u_value, v_value, r[offset].w, g[offset].w, b[offset].w);
}
__global__ void yuv420torgb_plain2(const uint8_t* Y, const uint8_t* U, const uint8_t* V, uint8_t* r, uint8_t* g, uint8_t* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x >> 1);
int u_value = U[uvOffset] - 128;
int v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset], u_value, v_value, r[offset], g[offset], b[offset]);
}
__global__ void yuv420torgb_plain(const uchar4* Y, const uint8_t* U, const uint8_t* V, uchar4* r, uchar4* g, uchar4* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
int u_value = U[uvOffset] - 128;
int v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset].x, u_value, v_value, r[offset].x, g[offset].x, b[offset].x);
YUV_TO_RGB(Y[offset].y, u_value, v_value, r[offset].y, g[offset].y, b[offset].y);
uvOffset += 1;
u_value = U[uvOffset] - 128;
v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset].z, u_value, v_value, r[offset].z, g[offset].z, b[offset].z);
YUV_TO_RGB(Y[offset].w, u_value, v_value, r[offset].w, g[offset].w, b[offset].w);
}
__global__ void rgbtoyuv420(const uchar4* R, const uchar4* G, const uchar4* B, uchar4* Y, uint8_t* U, uint8_t* V, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
RGB_TO_Y(R[offset].x, G[offset].x, B[offset].x, Y[offset].x);
RGB_TO_Y(R[offset].y, G[offset].y, B[offset].y, Y[offset].y);
RGB_TO_Y(R[offset].z, G[offset].z, B[offset].z, Y[offset].z);
RGB_TO_Y(R[offset].w, G[offset].w, B[offset].w, Y[offset].w);
if (y % 2 == 0)
{
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
RGB_TO_UV(R[offset].x, G[offset].x, B[offset].x, U[uvOffset], V[uvOffset]);
uvOffset += 1;
RGB_TO_UV(R[offset].z, G[offset].z, B[offset].z, U[uvOffset], V[uvOffset]);
}
}
__global__ void rgbhuesaturation(const uchar4* r, const uchar4* g, const uchar4* b, uchar4* R, uchar4* G, uchar4* B, Npp32f hue, Npp32f saturation, int width, int height, int step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step + x;
RGBHUESATURATIONADJUST(r[offset].x, g[offset].x, b[offset].x, R[offset].x, G[offset].x, B[offset].x, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].y, g[offset].y, b[offset].y, R[offset].y, G[offset].y, B[offset].y, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].z, g[offset].z, b[offset].z, R[offset].z, G[offset].z, B[offset].z, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].w, g[offset].w, b[offset].w, R[offset].w, G[offset].w, B[offset].w, hue, saturation);
}
__global__ void yuv420huesaturation(const uchar4* Yold, const uint8_t* Uold, const uint8_t* Vold, uchar4* Y, uint8_t* U, uint8_t* V, Npp32f brightness, Npp32f contrast, Npp32f hue, Npp32f saturation, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
int u_value = Uold[uvOffset] - 128;
int v_value = Vold[uvOffset] - 128;
if (y % 2 == 0)
{
YUVHUESATURATIONADJUST(Yold[offset].x, u_value, v_value, Y[offset].x, U[uvOffset], V[uvOffset], brightness, contrast, hue, saturation);
}
else
{
YUVHUESATURATIONADJUST_Y(Yold[offset].x, u_value, v_value, Y[offset].x, brightness, contrast, hue, saturation);
}
YUVHUESATURATIONADJUST_Y(Yold[offset].y, u_value, v_value, Y[offset].y, brightness, contrast, hue, saturation);
uvOffset += 1;
u_value = Uold[uvOffset] - 128;
v_value = Vold[uvOffset] - 128;
if (y % 2 == 0)
{
YUVHUESATURATIONADJUST(Yold[offset].z, u_value, v_value, Y[offset].z, U[uvOffset], V[uvOffset], brightness, contrast, hue, saturation);
}
else
{
YUVHUESATURATIONADJUST_Y(Yold[offset].z, u_value, v_value, Y[offset].z, brightness, contrast, hue, saturation);
}
YUVHUESATURATIONADJUST_Y(Yold[offset].w, u_value, v_value, Y[offset].w, brightness, contrast, hue, saturation);
}
void launch_yuv420torgb(const Npp8u* Y, const Npp8u* U, const Npp8u* V, Npp8u* R, Npp8u* G, Npp8u* B, int step_y, int step_uv, NppiSize size, hipStream_t stream, std::string method)
{
if (method == "plain")
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb_plain << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(Y), reinterpret_cast<const uint8_t*>(U), reinterpret_cast<const uint8_t*>(V), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), width, size.height, step_y, step_uv);
}
else if (method == "plain2")
{
auto width = size.width;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb_plain2 << <grid, block, 0, stream >> > (reinterpret_cast<const uint8_t*>(Y), reinterpret_cast<const uint8_t*>(U), reinterpret_cast<const uint8_t*>(V), reinterpret_cast<uint8_t*>(R), reinterpret_cast<uint8_t*>(G), reinterpret_cast<uint8_t*>(B), size.width, size.height, step_y, step_uv);
}
else
{
auto width = size.width >> 2;
step_y = step_y >> 2;
step_uv = step_uv >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(Y), reinterpret_cast<const uint32_t*>(U), reinterpret_cast<const uint32_t*>(V), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), width, size.height, step_y, step_uv);
}
}
void launch_rgbtoyuv420(const Npp8u* R, const Npp8u* G, const Npp8u* B, Npp8u* Y, Npp8u* U, Npp8u* V, int step_y, int step_uv, NppiSize size, hipStream_t stream, std::string method)
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
rgbtoyuv420 << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(R), reinterpret_cast<const uchar4*>(G), reinterpret_cast<const uchar4*>(B), reinterpret_cast<uchar4*>(Y), reinterpret_cast<uint8_t*>(U), reinterpret_cast<uint8_t*>(V), width, size.height, step_y, step_uv);
}
void launch_rgbtohsv(const Npp8u* R, const Npp8u* G, const Npp8u* B, Npp8u* H, Npp8u* S, Npp8u* V, int step, NppiSize size, hipStream_t stream, std::string method)
{
auto width = size.width >> 2;
step = step >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
// rgbtohsv << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(R), reinterpret_cast<const uchar4*>(G), reinterpret_cast<const uchar4*>(B), reinterpret_cast<uchar4*>(H), reinterpret_cast<uchar4*>(S), reinterpret_cast<uchar4*>(V), width, size.height, step);
}
void launch_rgbhuesaturation(const Npp8u* r, const Npp8u* g, const Npp8u* b, Npp8u* R, Npp8u* G, Npp8u* B, Npp32f hue, Npp32f saturation, int step, NppiSize size, hipStream_t stream, std::string method)
{
auto width = size.width >> 2;
step = step >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
rgbhuesaturation << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(r), reinterpret_cast<const uchar4*>(g), reinterpret_cast<const uchar4*>(b), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), hue, saturation, width, size.height, step);
}
void launch_yuv420huesaturation(const Npp8u* y, const Npp8u* u, const Npp8u* v, Npp8u* Y, Npp8u* U, Npp8u* V, Npp32f brightness, Npp32f contrast, Npp32f hue, Npp32f saturation, int step_y, int step_uv, NppiSize size, hipStream_t stream, std::string method)
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420huesaturation << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(y), reinterpret_cast<const uint8_t*>(u), reinterpret_cast<const uint8_t*>(v), reinterpret_cast<uchar4*>(Y), reinterpret_cast<uint8_t*>(U), reinterpret_cast<uint8_t*>(V), brightness, contrast, hue, saturation, width, size.height, step_y, step_uv);
} | dc5d5dcf5dc34e69e5433553a2b3d81744f0ceab.cu | #include "kernels.h"
#define BLOCK_SIZE 1024
#define UINT32_BLOCK_STEP_2 64 // 16X4
#define BLOCK_SIZE_4 256
#define CLAMP_1(x) x < 0 ? 0 : (x > 1 ? 1 : x)
#define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x)
#define CLAMP_int8(x) x < -128 ? -128 : (x > 127 ? 127 : x)
// https://en.wikipedia.org/wiki/YUV#Y%E2%80%B2UV420p_(and_Y%E2%80%B2V12_or_YV12)_to_RGB888_conversion
//#define YUV_TO_RGB( Y, U, V, R, G, B ) \
// do \
// { \
// int rTmp = Y + (1.370705 * V); \
// int gTmp = Y - (0.698001 * V) - (0.337633 * U); \
// int bTmp = Y + (1.732446 * U); \
// R = CLAMP_255(rTmp); \
// G = CLAMP_255(gTmp); \
// B = CLAMP_255(bTmp); \
// } while (0)
#define YUV_TO_RGB( Y, U, V, R, G, B ) \
do \
{ \
float rTmp = Y + __fmul_rn (1.370705, V); \
float gTmp = Y - __fmul_rn (0.698001, V) - __fmul_rn (0.337633, U); \
float bTmp = Y + __fmul_rn (1.732446, U); \
R = CLAMP_255(rTmp); \
G = CLAMP_255(gTmp); \
B = CLAMP_255(bTmp); \
} while (0)
#define RGB_TO_Y( R, G, B, Y ) \
do \
{ \
int yTmp = __fmul_rn(R, 0.299) + __fmul_rn (0.587, G) + __fmul_rn (0.114, B); \
Y = CLAMP_255(yTmp); \
} while (0)
#define RGB_TO_UV( R, G, B, U, V ) \
do \
{ \
int uTmp = __fmul_rn(B, 0.436) - __fmul_rn (0.289, G) - __fmul_rn (0.147, R); \
int vTmp = __fmul_rn(R, 0.615) - __fmul_rn (0.515, G) - __fmul_rn (0.1, B); \
U = 128 + (CLAMP_int8(uTmp)); \
V = 128 + (CLAMP_int8(vTmp)); \
} while (0)
#define RGB_TO_HSV(R, G, B, H, S, V) \
do \
{ \
Npp32f nNormalizedR = __fmul_rn(R, 0.003921569F); /* 255.0F*/ \
Npp32f nNormalizedG = __fmul_rn(G, 0.003921569F); \
Npp32f nNormalizedB = __fmul_rn(B, 0.003921569F); \
Npp32f nS; \
Npp32f nH; \
/* Value*/ \
Npp32f nV = fmaxf(nNormalizedR, nNormalizedG); \
nV = fmaxf(nV, nNormalizedB); \
/*Saturation*/ \
Npp32f nTemp = fminf(nNormalizedR, nNormalizedG); \
nTemp = fminf(nTemp, nNormalizedB); \
Npp32f nDivisor = __fsub_rn(nV, nTemp); \
if (nV == 0.0F) /*achromatics case*/ \
{ \
nS = 0.0F; \
nH = 0.0F; \
} \
else /*chromatics case*/ \
{ \
nS = __fdiv_rn(nDivisor, nV); \
} \
/* Hue:*/ \
Npp32f nCr = __fdiv_rn(__fsub_rn(nV, nNormalizedR), nDivisor); \
Npp32f nCg = __fdiv_rn(__fsub_rn(nV, nNormalizedG), nDivisor); \
Npp32f nCb = __fdiv_rn(__fsub_rn(nV, nNormalizedB), nDivisor); \
if (nNormalizedR == nV) \
nH = nCb - nCg; \
else if (nNormalizedG == nV) \
nH = __fadd_rn(2.0F, __fsub_rn(nCr, nCb)); \
else if (nNormalizedB == nV) \
nH = __fadd_rn(4.0F, __fsub_rn(nCg, nCr)); \
nH = __fmul_rn(nH, 0.166667F); /* 6.0F*/ \
if (nH < 0.0F) \
nH = __fadd_rn(nH, 1.0F); \
H = CLAMP_1(nH); \
S = CLAMP_1(nS); \
V = CLAMP_1(nV); \
\
} while(0)
#define HSV_TO_RGB(nNormalizedH, nNormalizedS, nNormalizedV, R, G, B) \
do \
{ \
Npp32f nR; \
Npp32f nG; \
Npp32f nB; \
if (nNormalizedS == 0.0F) \
{ \
nR = nG = nB = nNormalizedV; \
} \
else \
{ \
if (nNormalizedH == 1.0F) \
nNormalizedH = 0.0F; \
else \
{ \
/* 0.1667F*/ \
nNormalizedH = __fmul_rn(nNormalizedH, 6.0F); \
} \
} \
Npp32f nI = floorf(nNormalizedH); \
Npp32f nF = nNormalizedH - nI; \
Npp32f nM = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, nNormalizedS)); \
Npp32f nN = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, __fmul_rn(nNormalizedS, nF) ) ); \
Npp32f nK = __fmul_rn(nNormalizedV, __fsub_rn(1.0F, __fmul_rn(nNormalizedS, __fsub_rn(1.0F, nF)) ) ); \
if (nI == 0.0F) \
{ \
nR = nNormalizedV; nG = nK; nB = nM; \
} \
else if (nI == 1.0F) \
{ \
nR = nN; nG = nNormalizedV; nB = nM; \
} \
else if (nI == 2.0F) \
{ \
nR = nM; nG = nNormalizedV; nB = nK; \
} \
else if (nI == 3.0F) \
{ \
nR = nM; nG = nN; nB = nNormalizedV; \
} \
else if (nI == 4.0F) \
{ \
nR = nK; nG = nM; nB = nNormalizedV; \
} \
else if (nI == 5.0F) \
{ \
nR = nNormalizedV; nG = nM; nB = nN; \
} \
R = CLAMP_255(__fmul_rn(nR, 255.0F)); \
G = CLAMP_255(__fmul_rn(nG, 255.0F)); \
B = CLAMP_255(__fmul_rn(nB, 255.0F)); \
\
} while(0)
#define RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation) \
do \
{ \
Npp32f H, S, V; \
RGB_TO_HSV(r, g, b, H, S, V); \
H = CLAMP_1(__fadd_rn(H, hue)); \
S = CLAMP_1(__fmul_rn(S, saturation)); \
HSV_TO_RGB(H, S, V, R, G, B); \
} while(0)
#define BRIGHNESS_CONTRAST(input, output, brightness, contrast) \
do \
{ \
output = __fadd_rn(__fmul_rn(input, contrast), brightness); \
output = CLAMP_255(output); \
} while(0) \
#define BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast) \
do \
{ \
BRIGHNESS_CONTRAST(r, r, brightness, contrast); \
BRIGHNESS_CONTRAST(g, g, brightness, contrast); \
BRIGHNESS_CONTRAST(b, b, brightness, contrast); \
} while(0)
#define YUVHUESATURATIONADJUST_Y(y, u, v, Y, brightness, contrast, hue, saturation) \
do \
{ \
Npp32f r, g, b; \
YUV_TO_RGB(y, u, v, r, g, b); \
BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast); \
Npp8u R, G, B; \
RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation); \
RGB_TO_Y(R, G, B, Y); \
} while(0)
#define YUVHUESATURATIONADJUST(y, u, v, Y, U, V, brightness, contrast, hue, saturation) \
do \
{ \
Npp32f r, g, b; \
YUV_TO_RGB(y, u, v, r, g, b); \
BRIGHNESS_CONTRAST_RGB(r, g, b, brightness, contrast); \
Npp8u R, G, B; \
RGBHUESATURATIONADJUST(r, g, b, R, G, B, hue, saturation); \
RGB_TO_Y(R, G, B, Y); \
RGB_TO_UV(R, G, B, U, V); \
} while (0)
__global__ void yuv420torgb(const uchar4* Y, const uint32_t* U, const uint32_t* V, uchar4* r, uchar4* g, uchar4* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
__shared__ uint32_t u_data[BLOCK_SIZE_4];
__shared__ uint32_t v_data[BLOCK_SIZE_4];
// read for every 4 frames once
if (threadIdx.x % 2 == 0 && threadIdx.y % 2 == 0)
{
// 16*threadIdx.y*0.5 + threadIdx.x*0.5
auto uvThreadOffset = (threadIdx.y << 3) + (threadIdx.x >> 1);
auto uvOffset = (y >> 1) * (step_uv)+(x >> 1);
u_data[uvThreadOffset] = U[uvOffset];
v_data[uvThreadOffset] = V[uvOffset];
}
__syncthreads();
// 32x32x4 y, r, g, b values
// 16x16x4 u, v values
auto u_data_uint8 = reinterpret_cast<uint8_t*>(u_data);
auto v_data_uint8 = reinterpret_cast<uint8_t*>(v_data);
auto uvThreadOffset = (threadIdx.y >> 1)*UINT32_BLOCK_STEP_2 + (threadIdx.x << 1);
int u_value = u_data_uint8[uvThreadOffset] - 128;
int v_value = v_data_uint8[uvThreadOffset] - 128;
YUV_TO_RGB(Y[offset].x, u_value, v_value, r[offset].x, g[offset].x, b[offset].x);
YUV_TO_RGB(Y[offset].y, u_value, v_value, r[offset].y, g[offset].y, b[offset].y);
uvThreadOffset += 1;
u_value = u_data_uint8[uvThreadOffset] - 128;
v_value = v_data_uint8[uvThreadOffset] - 128;
YUV_TO_RGB(Y[offset].z, u_value, v_value, r[offset].z, g[offset].z, b[offset].z);
YUV_TO_RGB(Y[offset].w, u_value, v_value, r[offset].w, g[offset].w, b[offset].w);
}
__global__ void yuv420torgb_plain2(const uint8_t* Y, const uint8_t* U, const uint8_t* V, uint8_t* r, uint8_t* g, uint8_t* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x >> 1);
int u_value = U[uvOffset] - 128;
int v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset], u_value, v_value, r[offset], g[offset], b[offset]);
}
__global__ void yuv420torgb_plain(const uchar4* Y, const uint8_t* U, const uint8_t* V, uchar4* r, uchar4* g, uchar4* b, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
int u_value = U[uvOffset] - 128;
int v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset].x, u_value, v_value, r[offset].x, g[offset].x, b[offset].x);
YUV_TO_RGB(Y[offset].y, u_value, v_value, r[offset].y, g[offset].y, b[offset].y);
uvOffset += 1;
u_value = U[uvOffset] - 128;
v_value = V[uvOffset] - 128;
YUV_TO_RGB(Y[offset].z, u_value, v_value, r[offset].z, g[offset].z, b[offset].z);
YUV_TO_RGB(Y[offset].w, u_value, v_value, r[offset].w, g[offset].w, b[offset].w);
}
__global__ void rgbtoyuv420(const uchar4* R, const uchar4* G, const uchar4* B, uchar4* Y, uint8_t* U, uint8_t* V, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
RGB_TO_Y(R[offset].x, G[offset].x, B[offset].x, Y[offset].x);
RGB_TO_Y(R[offset].y, G[offset].y, B[offset].y, Y[offset].y);
RGB_TO_Y(R[offset].z, G[offset].z, B[offset].z, Y[offset].z);
RGB_TO_Y(R[offset].w, G[offset].w, B[offset].w, Y[offset].w);
if (y % 2 == 0)
{
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
RGB_TO_UV(R[offset].x, G[offset].x, B[offset].x, U[uvOffset], V[uvOffset]);
uvOffset += 1;
RGB_TO_UV(R[offset].z, G[offset].z, B[offset].z, U[uvOffset], V[uvOffset]);
}
}
__global__ void rgbhuesaturation(const uchar4* r, const uchar4* g, const uchar4* b, uchar4* R, uchar4* G, uchar4* B, Npp32f hue, Npp32f saturation, int width, int height, int step)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step + x;
RGBHUESATURATIONADJUST(r[offset].x, g[offset].x, b[offset].x, R[offset].x, G[offset].x, B[offset].x, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].y, g[offset].y, b[offset].y, R[offset].y, G[offset].y, B[offset].y, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].z, g[offset].z, b[offset].z, R[offset].z, G[offset].z, B[offset].z, hue, saturation);
RGBHUESATURATIONADJUST(r[offset].w, g[offset].w, b[offset].w, R[offset].w, G[offset].w, B[offset].w, hue, saturation);
}
__global__ void yuv420huesaturation(const uchar4* Yold, const uint8_t* Uold, const uint8_t* Vold, uchar4* Y, uint8_t* U, uint8_t* V, Npp32f brightness, Npp32f contrast, Npp32f hue, Npp32f saturation, int width, int height, int step_y, int step_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
{
return;
}
int offset = y * step_y + x;
auto uvOffset = (y >> 1) * (step_uv)+(x << 1);
int u_value = Uold[uvOffset] - 128;
int v_value = Vold[uvOffset] - 128;
if (y % 2 == 0)
{
YUVHUESATURATIONADJUST(Yold[offset].x, u_value, v_value, Y[offset].x, U[uvOffset], V[uvOffset], brightness, contrast, hue, saturation);
}
else
{
YUVHUESATURATIONADJUST_Y(Yold[offset].x, u_value, v_value, Y[offset].x, brightness, contrast, hue, saturation);
}
YUVHUESATURATIONADJUST_Y(Yold[offset].y, u_value, v_value, Y[offset].y, brightness, contrast, hue, saturation);
uvOffset += 1;
u_value = Uold[uvOffset] - 128;
v_value = Vold[uvOffset] - 128;
if (y % 2 == 0)
{
YUVHUESATURATIONADJUST(Yold[offset].z, u_value, v_value, Y[offset].z, U[uvOffset], V[uvOffset], brightness, contrast, hue, saturation);
}
else
{
YUVHUESATURATIONADJUST_Y(Yold[offset].z, u_value, v_value, Y[offset].z, brightness, contrast, hue, saturation);
}
YUVHUESATURATIONADJUST_Y(Yold[offset].w, u_value, v_value, Y[offset].w, brightness, contrast, hue, saturation);
}
void launch_yuv420torgb(const Npp8u* Y, const Npp8u* U, const Npp8u* V, Npp8u* R, Npp8u* G, Npp8u* B, int step_y, int step_uv, NppiSize size, cudaStream_t stream, std::string method)
{
if (method == "plain")
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb_plain << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(Y), reinterpret_cast<const uint8_t*>(U), reinterpret_cast<const uint8_t*>(V), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), width, size.height, step_y, step_uv);
}
else if (method == "plain2")
{
auto width = size.width;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb_plain2 << <grid, block, 0, stream >> > (reinterpret_cast<const uint8_t*>(Y), reinterpret_cast<const uint8_t*>(U), reinterpret_cast<const uint8_t*>(V), reinterpret_cast<uint8_t*>(R), reinterpret_cast<uint8_t*>(G), reinterpret_cast<uint8_t*>(B), size.width, size.height, step_y, step_uv);
}
else
{
auto width = size.width >> 2;
step_y = step_y >> 2;
step_uv = step_uv >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420torgb << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(Y), reinterpret_cast<const uint32_t*>(U), reinterpret_cast<const uint32_t*>(V), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), width, size.height, step_y, step_uv);
}
}
void launch_rgbtoyuv420(const Npp8u* R, const Npp8u* G, const Npp8u* B, Npp8u* Y, Npp8u* U, Npp8u* V, int step_y, int step_uv, NppiSize size, cudaStream_t stream, std::string method)
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
rgbtoyuv420 << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(R), reinterpret_cast<const uchar4*>(G), reinterpret_cast<const uchar4*>(B), reinterpret_cast<uchar4*>(Y), reinterpret_cast<uint8_t*>(U), reinterpret_cast<uint8_t*>(V), width, size.height, step_y, step_uv);
}
void launch_rgbtohsv(const Npp8u* R, const Npp8u* G, const Npp8u* B, Npp8u* H, Npp8u* S, Npp8u* V, int step, NppiSize size, cudaStream_t stream, std::string method)
{
auto width = size.width >> 2;
step = step >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
// rgbtohsv << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(R), reinterpret_cast<const uchar4*>(G), reinterpret_cast<const uchar4*>(B), reinterpret_cast<uchar4*>(H), reinterpret_cast<uchar4*>(S), reinterpret_cast<uchar4*>(V), width, size.height, step);
}
void launch_rgbhuesaturation(const Npp8u* r, const Npp8u* g, const Npp8u* b, Npp8u* R, Npp8u* G, Npp8u* B, Npp32f hue, Npp32f saturation, int step, NppiSize size, cudaStream_t stream, std::string method)
{
auto width = size.width >> 2;
step = step >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
rgbhuesaturation << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(r), reinterpret_cast<const uchar4*>(g), reinterpret_cast<const uchar4*>(b), reinterpret_cast<uchar4*>(R), reinterpret_cast<uchar4*>(G), reinterpret_cast<uchar4*>(B), hue, saturation, width, size.height, step);
}
void launch_yuv420huesaturation(const Npp8u* y, const Npp8u* u, const Npp8u* v, Npp8u* Y, Npp8u* U, Npp8u* V, Npp32f brightness, Npp32f contrast, Npp32f hue, Npp32f saturation, int step_y, int step_uv, NppiSize size, cudaStream_t stream, std::string method)
{
auto width = size.width >> 2;
step_y = step_y >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuv420huesaturation << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(y), reinterpret_cast<const uint8_t*>(u), reinterpret_cast<const uint8_t*>(v), reinterpret_cast<uchar4*>(Y), reinterpret_cast<uint8_t*>(U), reinterpret_cast<uint8_t*>(V), brightness, contrast, hue, saturation, width, size.height, step_y, step_uv);
} |
473c170adf73c3e0372f1d421ad1fb634915799f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%cu
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ int d_size;
__global__ void partition (int *arr, int *arr_l, int *arr_h,long int n)
{
int z = blockIdx.x*blockDim.x+threadIdx.x;
d_size = 0;
__syncthreads();
if (z<n)
{
int h = arr_h[z];
int l = arr_l[z];
int x = arr[h];
int i = (l - 1);
int temp;
for (int j = l; j <= h- 1; j++)
{
if (arr[j] <= x)
{
i++;
temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
}
}
temp = arr[i+1];
arr[i+1] = arr[h];
arr[h] = temp;
int p = (i + 1);
if (p-1 > l)
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = l;
arr_h[ind] = p-1;
}
if ( p+1 < h )
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = p+1;
arr_h[ind] = h;
}
}
}
void quickSortIterative (int arr[],long int l,long int h)
{
int lstack[ h - l + 1 ], hstack[ h - l + 1];
int *d_d, *d_l, *d_h;
long int top = -1;
lstack[ ++top ] = l;
hstack[ top ] = h;
hipMalloc(&d_d, (h-l+1)*sizeof(int));
hipMemcpy(d_d, arr,(h-l+1)*sizeof(int),hipMemcpyHostToDevice);
hipMalloc(&d_l, (h-l+1)*sizeof(int));
hipMemcpy(d_l, lstack,(h-l+1)*sizeof(int),hipMemcpyHostToDevice);
hipMalloc(&d_h, (h-l+1)*sizeof(int));
hipMemcpy(d_h, hstack,(h-l+1)*sizeof(int),hipMemcpyHostToDevice);
int n_t = 1;
int n_b = 1;
long int n_i = 1;
while ( n_i > 0 )
{
hipLaunchKernelGGL(( partition), dim3(n_b),dim3(n_t), 0, 0, d_d, d_l, d_h, n_i);
int answer;
hipMemcpyFromSymbol(&answer, d_size, sizeof(int), 0, hipMemcpyDeviceToHost);
if (answer < 1024)
{
n_t = answer;
}
else
{
n_t = 1024;
n_b = answer/n_t + (answer%n_t==0?0:1);
}
n_i = answer;
hipMemcpy(arr, d_d,(h-l+1)*sizeof(int),hipMemcpyDeviceToHost);
}
}
int main()
{
long int n=1024*1;
int arr[n];
srand(time(NULL));
for (int i = 0; i<n; i++)
{
arr[i] = rand ()%10000;
}
n = sizeof( arr ) / sizeof( *arr );
hipEvent_t start,end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
quickSortIterative( arr, 0, n - 1 );
hipEventRecord(end);
hipEventSynchronize(end);
float milliseconds=0;
hipEventElapsedTime(&milliseconds,start,end);
double timeTaken=(double)milliseconds;
double throughput = (n*sizeof(int))/(timeTaken);
printf("%f,%f",timeTaken/1000,throughput);
return 0;
} | 473c170adf73c3e0372f1d421ad1fb634915799f.cu | %%cu
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ int d_size;
__global__ void partition (int *arr, int *arr_l, int *arr_h,long int n)
{
int z = blockIdx.x*blockDim.x+threadIdx.x;
d_size = 0;
__syncthreads();
if (z<n)
{
int h = arr_h[z];
int l = arr_l[z];
int x = arr[h];
int i = (l - 1);
int temp;
for (int j = l; j <= h- 1; j++)
{
if (arr[j] <= x)
{
i++;
temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
}
}
temp = arr[i+1];
arr[i+1] = arr[h];
arr[h] = temp;
int p = (i + 1);
if (p-1 > l)
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = l;
arr_h[ind] = p-1;
}
if ( p+1 < h )
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = p+1;
arr_h[ind] = h;
}
}
}
void quickSortIterative (int arr[],long int l,long int h)
{
int lstack[ h - l + 1 ], hstack[ h - l + 1];
int *d_d, *d_l, *d_h;
long int top = -1;
lstack[ ++top ] = l;
hstack[ top ] = h;
cudaMalloc(&d_d, (h-l+1)*sizeof(int));
cudaMemcpy(d_d, arr,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_l, (h-l+1)*sizeof(int));
cudaMemcpy(d_l, lstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_h, (h-l+1)*sizeof(int));
cudaMemcpy(d_h, hstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
int n_t = 1;
int n_b = 1;
long int n_i = 1;
while ( n_i > 0 )
{
partition<<<n_b,n_t>>>( d_d, d_l, d_h, n_i);
int answer;
cudaMemcpyFromSymbol(&answer, d_size, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (answer < 1024)
{
n_t = answer;
}
else
{
n_t = 1024;
n_b = answer/n_t + (answer%n_t==0?0:1);
}
n_i = answer;
cudaMemcpy(arr, d_d,(h-l+1)*sizeof(int),cudaMemcpyDeviceToHost);
}
}
int main()
{
long int n=1024*1;
int arr[n];
srand(time(NULL));
for (int i = 0; i<n; i++)
{
arr[i] = rand ()%10000;
}
n = sizeof( arr ) / sizeof( *arr );
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
quickSortIterative( arr, 0, n - 1 );
cudaEventRecord(end);
cudaEventSynchronize(end);
float milliseconds=0;
cudaEventElapsedTime(&milliseconds,start,end);
double timeTaken=(double)milliseconds;
double throughput = (n*sizeof(int))/(timeTaken);
printf("%f,%f",timeTaken/1000,throughput);
return 0;
} |
b1f9d08ef537cb730b284168750adb80028d186b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.h"
#include "serialize.hpp"
#include "skipLayerNormPlugin.h"
#include <cassert>
#include <cstring>
#include <vector>
using namespace nvinfer1;
using bert::operator+;
namespace bert
{
template <typename T, int TPB, int VPT, bool hasBias>
__global__ void skipln_vec(
const int ld, const T* input, const T* skip, T* output, const T* beta, const T* gamma, const T* bias)
{
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T in_local[VPT];
T skip_local[VPT];
T bias_local[VPT];
copy<sizeof(T) * VPT>(&input[idx], in_local);
copy<sizeof(T) * VPT>(&skip[idx], skip_local);
copy<sizeof(T) * VPT>(&bias[threadIdx.x * VPT], bias_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++)
{
in_local[it] += skip_local[it];
if (hasBias)
in_local[it] += bias_local[it];
const T tmp = rld * in_local[it];
local += tmp;
local2 += tmp * in_local[it];
}
copy<sizeof(T) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(T) * VPT>(&gamma[threadIdx.x * VPT], skip_local);
using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), hipcub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu + T(1e-5));
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
in_local[it] = skip_local[it] * (in_local[it] - mu) * rsigma + bias_local[it];
}
copy<sizeof(T) * VPT>(in_local, &output[idx]);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output, const T* bias)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld)
{
val = input[idx] + skip[idx];
if (hasBias)
{
val += bias[threadIdx.x];
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernel(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output, const T* bias)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB)
{
const int idx = offset + i;
T val = T(input[idx]) + T(skip[idx]);
if (hasBias)
{
val += T(bias[i]);
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, T, T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <typename T, bool hasBias>
int computeSkipLayerNorm(hipStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias)
{
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
constexpr int VPT = 16 / sizeof(T);
if (ld <= 32)
{
constexpr int blockSize = 32;
hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize, hasBias>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output, bias);
}
else if (ld == 768)
{
constexpr int TPB = 768 / VPT;
hipLaunchKernelGGL(( skipln_vec<T, TPB, VPT, hasBias>), dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, bias);
}
else if (ld == 1024)
{
constexpr int TPB = 1024 / VPT;
hipLaunchKernelGGL(( skipln_vec<T, TPB, VPT, hasBias>), dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, bias);
}
else
{
constexpr int blockSize = 256;
hipLaunchKernelGGL(( skipLayerNormKernel<T, blockSize, hasBias>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output, bias);
}
CHECK(hipPeekAtLastError());
return 0;
}
// Clip plugin specific constants
namespace
{
static const char* SKIP_LAYER_NORM_VERSION{"1"};
static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPluginDynamic"};
} // namespace
// Static class fields initialization
PluginFieldCollection SkipLayerNormPluginDynamicCreator::mFC{};
std::vector<PluginField> SkipLayerNormPluginDynamicCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginDynamicCreator);
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(
const std::string name, const DataType type, const int ld, const Weights& beta, const Weights& gamma)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
, mHasBias(false)
, mType(type)
{
mBias.values = nullptr;
mBias.count = 0;
}
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const DataType type, const int ld,
const Weights& beta, const Weights& gamma, const Weights& bias)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
, mHasBias(true)
, mBias(bias)
, mType(type)
{
}
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "Starting to deserialize SkipLayerNorm plugin" << std::endl;
// Deserialize in the same order as serialization
deserialize_value(&data, &length, &mType);
deserialize_value(&data, &length, &mLd);
deserialize_value(&data, &length, &mHasBias);
const char* d = static_cast<const char*>(data);
const size_t wordSize = samplesCommon::getElementSize(mType);
mBetaDev = deserToDev<char>(d, mLd * wordSize);
mGammaDev = deserToDev<char>(d, mLd * wordSize);
if (mHasBias)
{
mBiasDev = deserToDev<char>(d, mLd * wordSize);
}
// this signals init not to allocate/copy
mGamma.count = mLd;
mGamma.values = nullptr;
mBeta.count = mLd;
mBeta.values = nullptr;
mBias.count = mLd;
mBias.values = nullptr;
gLogVerbose << "Finished deserializing SkipLayerNorm plugin" << std::endl;
}
// IPluginV2DynamicExt Methods
IPluginV2DynamicExt* SkipLayerNormPluginDynamic::clone() const
{
if (mHasBias)
{
return new SkipLayerNormPluginDynamic(mLayerName, mType, mLd, mBeta, mGamma, mBias);
}
return new SkipLayerNormPluginDynamic(mLayerName, mType, mLd, mBeta, mGamma);
}
DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder)
{
assert(nbInputs == 2);
assert(outputIndex == 0);
assert(inputs[0].nbDims == inputs[1].nbDims);
return inputs[0];
}
bool SkipLayerNormPluginDynamic::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
assert(nbInputs == 2);
assert(nbOutputs == 1);
const PluginTensorDesc& in = inOut[pos];
if (pos == 0)
{
return (in.type == mType) && (in.format == TensorFormat::kLINEAR);
}
const PluginTensorDesc& prev = inOut[pos - 1];
if (pos == 1)
{
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
void SkipLayerNormPluginDynamic::configurePlugin(
const DynamicPluginTensorDesc* inputs, int nbInputs, const DynamicPluginTensorDesc* outputs, int nbOutputs)
{
// Validate input arguments
assert(nbOutputs == 1);
assert(nbInputs == 2);
assert(mType == inputs[0].desc.type);
assert(mType == inputs[1].desc.type);
const auto& inDims0 = inputs[0].desc.dims;
const auto& inDims1 = inputs[1].desc.dims;
TRT_UNUSED inDims1;
assert(inDims0.nbDims == inDims1.nbDims);
assert(std::equal(inDims0.d, inDims0.d + inDims0.nbDims, inDims1.d));
assert(inDims0.nbDims == 5);
mLd = inDims0.d[HDIM]; // hiddensize
assert(inDims0.d[3] == 1);
assert(inDims0.d[4] == 1);
}
size_t SkipLayerNormPluginDynamic::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const
{
return 0;
}
int SkipLayerNormPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream)
{
const int inputVolume = volume(inputDesc[0].dims);
int status = -1;
// Our plugin outputs only one tensor
// Launch CUDA kernel wrapper and save its return value
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
const float* skip = static_cast<const float*>(inputs[1]);
float* output = static_cast<float*>(outputs[0]);
float* bias = reinterpret_cast<float*>(mBiasDev);
const float* beta = static_cast<const float*>(mBetaDev);
const float* gamma = static_cast<const float*>(mGammaDev);
if (mHasBias)
{
status
= computeSkipLayerNorm<float, true>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
else
{
status
= computeSkipLayerNorm<float, false>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
const half* skip = static_cast<const half*>(inputs[1]);
half* output = static_cast<half*>(outputs[0]);
half* bias = reinterpret_cast<half*>(mBiasDev);
const half* beta = static_cast<const half*>(mBetaDev);
const half* gamma = static_cast<const half*>(mGammaDev);
if (mHasBias)
{
status = computeSkipLayerNorm<half, true>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
else
{
status
= computeSkipLayerNorm<half, false>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
}
else
{
gLogError << "Unsupported Type\n";
assert(false);
}
return status;
}
// IPluginV2Ext Methods
DataType SkipLayerNormPluginDynamic::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const
{
assert(index == 0);
assert(nbInputs == 2);
assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF);
assert(inputTypes[0] == inputTypes[1]);
return inputTypes[0];
}
// IPluginV2 Methods
const char* SkipLayerNormPluginDynamic::getPluginType() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginDynamic::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
int SkipLayerNormPluginDynamic::getNbOutputs() const
{
return 1;
}
int SkipLayerNormPluginDynamic::initialize()
{
const size_t wordSize = samplesCommon::getElementSize(mType);
if (mGamma.values)
{
CHECK(hipMalloc(&mGammaDev, sizeof(float) * mGamma.count));
// target size
const size_t nbBytes = mGamma.count * wordSize;
CHECK(hipMalloc(&mGammaDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mGamma, static_cast<float*>(mGammaDev));
}
else
{
convertAndCopyToDevice(mGamma, static_cast<half*>(mGammaDev));
}
}
if (mBeta.values)
{
CHECK(hipMalloc(&mBetaDev, sizeof(float) * mBeta.count));
const size_t nbBytes = mBeta.count * wordSize;
CHECK(hipMalloc(&mBetaDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mBeta, static_cast<float*>(mBetaDev));
}
else
{
convertAndCopyToDevice(mBeta, static_cast<half*>(mBetaDev));
}
}
if (mHasBias && mBias.values)
{
// target size
const size_t nbBytes = mBias.count * wordSize;
CHECK(hipMalloc(&mBiasDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mBias, reinterpret_cast<float*>(mBiasDev));
}
else
{
convertAndCopyToDevice(mBias, reinterpret_cast<half*>(mBiasDev));
}
}
return 0;
}
void SkipLayerNormPluginDynamic::terminate()
{
gLogVerbose << "SKIPLN terminate start" << std::endl;
hipFree(mGammaDev);
hipFree(mBetaDev);
if (mHasBias)
{
hipFree(mBiasDev);
}
gLogVerbose << "SKIPLN terminate done" << std::endl;
}
size_t SkipLayerNormPluginDynamic::getSerializationSize() const
{
const size_t wordSize = samplesCommon::getElementSize(mType);
const size_t biasSize = mHasBias ? (mLd * wordSize) : 0;
return 2 * wordSize * mLd + sizeof(DataType) + sizeof(mLd) + biasSize + sizeof(mHasBias);
}
void SkipLayerNormPluginDynamic::serialize(void* buffer) const
{
serialize_value(&buffer, mType);
serialize_value(&buffer, mLd);
serialize_value(&buffer, mHasBias);
const size_t wordSize = samplesCommon::getElementSize(mType);
char* d = static_cast<char*>(buffer);
serFromDev(d, static_cast<char*>(mBetaDev), mLd * wordSize);
serFromDev(d, static_cast<char*>(mGammaDev), mLd * wordSize);
if (mHasBias)
{
const size_t wordSize = samplesCommon::getElementSize(mType);
serFromDev(d, mBiasDev, mLd * wordSize);
}
}
void SkipLayerNormPluginDynamic::destroy()
{
// This gets called when the network containing plugin is destroyed
delete this;
}
void SkipLayerNormPluginDynamic::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginDynamic::getPluginNamespace() const
{
return mNamespace.c_str();
}
/////////////////////////////////////////////////////////
SkipLayerNormPluginDynamicCreator::SkipLayerNormPluginDynamicCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* SkipLayerNormPluginDynamicCreator::getPluginName() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginDynamicCreator::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
const PluginFieldCollection* SkipLayerNormPluginDynamicCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* SkipLayerNormPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating SkipLayerNormPluginDynamicCreator...\n";
int ld = 0;
Weights beta{DataType::kFLOAT, nullptr, 0};
Weights gamma{DataType::kFLOAT, nullptr, 0};
Weights bias{DataType::kFLOAT, nullptr, 0};
int typeId = -1;
for (int i = 0; i < fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("ld") == 0)
{
ld = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building ld: " << ld << std::endl;
}
if (field_name.compare("type_id") == 0)
{
typeId = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building typeId: " << typeId << std::endl;
}
if (field_name.compare("beta") == 0)
{
gLogVerbose << "Building beta...\n";
beta.values = fc->fields[i].data;
beta.count = fc->fields[i].length;
beta.type = fieldTypeToDataType(fc->fields[i].type);
}
if (field_name.compare("gamma") == 0)
{
gLogVerbose << "Building gamma...\n";
gamma.values = fc->fields[i].data;
gamma.count = fc->fields[i].length;
gamma.type = fieldTypeToDataType(fc->fields[i].type);
}
if (field_name.compare("bias") == 0)
{
gLogVerbose << "Building bias...\n";
bias.values = fc->fields[i].data;
bias.count = fc->fields[i].length;
bias.type = fieldTypeToDataType(fc->fields[i].type);
}
}
if (typeId < 0 || typeId > 3)
{
gLogError << "SkipLayerNorm: Invalid type ID: " << typeId << std::endl;
}
if (beta.count <= 0 || beta.values == nullptr)
{
gLogError << "SkipLayerNorm: invalid beta" << std::endl;
}
if (gamma.count <= 0 || gamma.values == nullptr)
{
gLogError << "SkipLayerNorm: invalid gamma" << std::endl;
}
DataType type = static_cast<DataType>(typeId);
if (bias.values == nullptr)
{
return new SkipLayerNormPluginDynamic(name, type, ld, beta, gamma);
}
return new SkipLayerNormPluginDynamic(name, type, ld, beta, gamma, bias);
}
IPluginV2* SkipLayerNormPluginDynamicCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call SkipLayerNormPluginDynamic::destroy()
return new SkipLayerNormPluginDynamic(name, serialData, serialLength);
}
void SkipLayerNormPluginDynamicCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginDynamicCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
} // namespace bert
| b1f9d08ef537cb730b284168750adb80028d186b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "common.h"
#include "serialize.hpp"
#include "skipLayerNormPlugin.h"
#include <cassert>
#include <cstring>
#include <vector>
using namespace nvinfer1;
using bert::operator+;
namespace bert
{
template <typename T, int TPB, int VPT, bool hasBias>
__global__ void skipln_vec(
const int ld, const T* input, const T* skip, T* output, const T* beta, const T* gamma, const T* bias)
{
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T in_local[VPT];
T skip_local[VPT];
T bias_local[VPT];
copy<sizeof(T) * VPT>(&input[idx], in_local);
copy<sizeof(T) * VPT>(&skip[idx], skip_local);
copy<sizeof(T) * VPT>(&bias[threadIdx.x * VPT], bias_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++)
{
in_local[it] += skip_local[it];
if (hasBias)
in_local[it] += bias_local[it];
const T tmp = rld * in_local[it];
local += tmp;
local2 += tmp * in_local[it];
}
copy<sizeof(T) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(T) * VPT>(&gamma[threadIdx.x * VPT], skip_local);
using BlockReduce = cub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), cub::Sum());
if (threadIdx.x == 0)
{
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu + T(1e-5));
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++)
{
in_local[it] = skip_local[it] * (in_local[it] - mu) * rsigma + bias_local[it];
}
copy<sizeof(T) * VPT>(in_local, &output[idx]);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output, const T* bias)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld)
{
val = input[idx] + skip[idx];
if (hasBias)
{
val += bias[threadIdx.x];
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernel(
const int ld, const T* input, const T* skip, const T* beta, const T* gamma, T* output, const T* bias)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB)
{
const int idx = offset + i;
T val = T(input[idx]) + T(skip[idx]);
if (hasBias)
{
val += T(bias[i]);
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, T, T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <typename T, bool hasBias>
int computeSkipLayerNorm(cudaStream_t stream, const int ld, const int n, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias)
{
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
constexpr int VPT = 16 / sizeof(T);
if (ld <= 32)
{
constexpr int blockSize = 32;
skipLayerNormKernelSmall<T, blockSize, hasBias>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output, bias);
}
else if (ld == 768)
{
constexpr int TPB = 768 / VPT;
skipln_vec<T, TPB, VPT, hasBias><<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, bias);
}
else if (ld == 1024)
{
constexpr int TPB = 1024 / VPT;
skipln_vec<T, TPB, VPT, hasBias><<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, bias);
}
else
{
constexpr int blockSize = 256;
skipLayerNormKernel<T, blockSize, hasBias>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output, bias);
}
CHECK(cudaPeekAtLastError());
return 0;
}
// Clip plugin specific constants
namespace
{
static const char* SKIP_LAYER_NORM_VERSION{"1"};
static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPluginDynamic"};
} // namespace
// Static class fields initialization
PluginFieldCollection SkipLayerNormPluginDynamicCreator::mFC{};
std::vector<PluginField> SkipLayerNormPluginDynamicCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginDynamicCreator);
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(
const std::string name, const DataType type, const int ld, const Weights& beta, const Weights& gamma)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
, mHasBias(false)
, mType(type)
{
mBias.values = nullptr;
mBias.count = 0;
}
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const DataType type, const int ld,
const Weights& beta, const Weights& gamma, const Weights& bias)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
, mHasBias(true)
, mBias(bias)
, mType(type)
{
}
SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "Starting to deserialize SkipLayerNorm plugin" << std::endl;
// Deserialize in the same order as serialization
deserialize_value(&data, &length, &mType);
deserialize_value(&data, &length, &mLd);
deserialize_value(&data, &length, &mHasBias);
const char* d = static_cast<const char*>(data);
const size_t wordSize = samplesCommon::getElementSize(mType);
mBetaDev = deserToDev<char>(d, mLd * wordSize);
mGammaDev = deserToDev<char>(d, mLd * wordSize);
if (mHasBias)
{
mBiasDev = deserToDev<char>(d, mLd * wordSize);
}
// this signals init not to allocate/copy
mGamma.count = mLd;
mGamma.values = nullptr;
mBeta.count = mLd;
mBeta.values = nullptr;
mBias.count = mLd;
mBias.values = nullptr;
gLogVerbose << "Finished deserializing SkipLayerNorm plugin" << std::endl;
}
// IPluginV2DynamicExt Methods
IPluginV2DynamicExt* SkipLayerNormPluginDynamic::clone() const
{
if (mHasBias)
{
return new SkipLayerNormPluginDynamic(mLayerName, mType, mLd, mBeta, mGamma, mBias);
}
return new SkipLayerNormPluginDynamic(mLayerName, mType, mLd, mBeta, mGamma);
}
DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder)
{
assert(nbInputs == 2);
assert(outputIndex == 0);
assert(inputs[0].nbDims == inputs[1].nbDims);
return inputs[0];
}
bool SkipLayerNormPluginDynamic::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
assert(nbInputs == 2);
assert(nbOutputs == 1);
const PluginTensorDesc& in = inOut[pos];
if (pos == 0)
{
return (in.type == mType) && (in.format == TensorFormat::kLINEAR);
}
const PluginTensorDesc& prev = inOut[pos - 1];
if (pos == 1)
{
return in.type == prev.type && in.format == prev.format;
}
// output
return in.type == prev.type && in.format == prev.format;
}
void SkipLayerNormPluginDynamic::configurePlugin(
const DynamicPluginTensorDesc* inputs, int nbInputs, const DynamicPluginTensorDesc* outputs, int nbOutputs)
{
// Validate input arguments
assert(nbOutputs == 1);
assert(nbInputs == 2);
assert(mType == inputs[0].desc.type);
assert(mType == inputs[1].desc.type);
const auto& inDims0 = inputs[0].desc.dims;
const auto& inDims1 = inputs[1].desc.dims;
TRT_UNUSED inDims1;
assert(inDims0.nbDims == inDims1.nbDims);
assert(std::equal(inDims0.d, inDims0.d + inDims0.nbDims, inDims1.d));
assert(inDims0.nbDims == 5);
mLd = inDims0.d[HDIM]; // hiddensize
assert(inDims0.d[3] == 1);
assert(inDims0.d[4] == 1);
}
size_t SkipLayerNormPluginDynamic::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const
{
return 0;
}
int SkipLayerNormPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
{
const int inputVolume = volume(inputDesc[0].dims);
int status = -1;
// Our plugin outputs only one tensor
// Launch CUDA kernel wrapper and save its return value
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
const float* skip = static_cast<const float*>(inputs[1]);
float* output = static_cast<float*>(outputs[0]);
float* bias = reinterpret_cast<float*>(mBiasDev);
const float* beta = static_cast<const float*>(mBetaDev);
const float* gamma = static_cast<const float*>(mGammaDev);
if (mHasBias)
{
status
= computeSkipLayerNorm<float, true>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
else
{
status
= computeSkipLayerNorm<float, false>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
const half* skip = static_cast<const half*>(inputs[1]);
half* output = static_cast<half*>(outputs[0]);
half* bias = reinterpret_cast<half*>(mBiasDev);
const half* beta = static_cast<const half*>(mBetaDev);
const half* gamma = static_cast<const half*>(mGammaDev);
if (mHasBias)
{
status = computeSkipLayerNorm<half, true>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
else
{
status
= computeSkipLayerNorm<half, false>(stream, mLd, inputVolume, input, skip, beta, gamma, output, bias);
}
}
else
{
gLogError << "Unsupported Type\n";
assert(false);
}
return status;
}
// IPluginV2Ext Methods
DataType SkipLayerNormPluginDynamic::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const
{
assert(index == 0);
assert(nbInputs == 2);
assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF);
assert(inputTypes[0] == inputTypes[1]);
return inputTypes[0];
}
// IPluginV2 Methods
const char* SkipLayerNormPluginDynamic::getPluginType() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginDynamic::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
int SkipLayerNormPluginDynamic::getNbOutputs() const
{
return 1;
}
int SkipLayerNormPluginDynamic::initialize()
{
const size_t wordSize = samplesCommon::getElementSize(mType);
if (mGamma.values)
{
CHECK(cudaMalloc(&mGammaDev, sizeof(float) * mGamma.count));
// target size
const size_t nbBytes = mGamma.count * wordSize;
CHECK(cudaMalloc(&mGammaDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mGamma, static_cast<float*>(mGammaDev));
}
else
{
convertAndCopyToDevice(mGamma, static_cast<half*>(mGammaDev));
}
}
if (mBeta.values)
{
CHECK(cudaMalloc(&mBetaDev, sizeof(float) * mBeta.count));
const size_t nbBytes = mBeta.count * wordSize;
CHECK(cudaMalloc(&mBetaDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mBeta, static_cast<float*>(mBetaDev));
}
else
{
convertAndCopyToDevice(mBeta, static_cast<half*>(mBetaDev));
}
}
if (mHasBias && mBias.values)
{
// target size
const size_t nbBytes = mBias.count * wordSize;
CHECK(cudaMalloc(&mBiasDev, nbBytes));
if (mType == DataType::kFLOAT)
{
convertAndCopyToDevice(mBias, reinterpret_cast<float*>(mBiasDev));
}
else
{
convertAndCopyToDevice(mBias, reinterpret_cast<half*>(mBiasDev));
}
}
return 0;
}
void SkipLayerNormPluginDynamic::terminate()
{
gLogVerbose << "SKIPLN terminate start" << std::endl;
cudaFree(mGammaDev);
cudaFree(mBetaDev);
if (mHasBias)
{
cudaFree(mBiasDev);
}
gLogVerbose << "SKIPLN terminate done" << std::endl;
}
size_t SkipLayerNormPluginDynamic::getSerializationSize() const
{
const size_t wordSize = samplesCommon::getElementSize(mType);
const size_t biasSize = mHasBias ? (mLd * wordSize) : 0;
return 2 * wordSize * mLd + sizeof(DataType) + sizeof(mLd) + biasSize + sizeof(mHasBias);
}
void SkipLayerNormPluginDynamic::serialize(void* buffer) const
{
serialize_value(&buffer, mType);
serialize_value(&buffer, mLd);
serialize_value(&buffer, mHasBias);
const size_t wordSize = samplesCommon::getElementSize(mType);
char* d = static_cast<char*>(buffer);
serFromDev(d, static_cast<char*>(mBetaDev), mLd * wordSize);
serFromDev(d, static_cast<char*>(mGammaDev), mLd * wordSize);
if (mHasBias)
{
const size_t wordSize = samplesCommon::getElementSize(mType);
serFromDev(d, mBiasDev, mLd * wordSize);
}
}
void SkipLayerNormPluginDynamic::destroy()
{
// This gets called when the network containing plugin is destroyed
delete this;
}
void SkipLayerNormPluginDynamic::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginDynamic::getPluginNamespace() const
{
return mNamespace.c_str();
}
/////////////////////////////////////////////////////////
SkipLayerNormPluginDynamicCreator::SkipLayerNormPluginDynamicCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* SkipLayerNormPluginDynamicCreator::getPluginName() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginDynamicCreator::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
const PluginFieldCollection* SkipLayerNormPluginDynamicCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* SkipLayerNormPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating SkipLayerNormPluginDynamicCreator...\n";
int ld = 0;
Weights beta{DataType::kFLOAT, nullptr, 0};
Weights gamma{DataType::kFLOAT, nullptr, 0};
Weights bias{DataType::kFLOAT, nullptr, 0};
int typeId = -1;
for (int i = 0; i < fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("ld") == 0)
{
ld = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building ld: " << ld << std::endl;
}
if (field_name.compare("type_id") == 0)
{
typeId = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building typeId: " << typeId << std::endl;
}
if (field_name.compare("beta") == 0)
{
gLogVerbose << "Building beta...\n";
beta.values = fc->fields[i].data;
beta.count = fc->fields[i].length;
beta.type = fieldTypeToDataType(fc->fields[i].type);
}
if (field_name.compare("gamma") == 0)
{
gLogVerbose << "Building gamma...\n";
gamma.values = fc->fields[i].data;
gamma.count = fc->fields[i].length;
gamma.type = fieldTypeToDataType(fc->fields[i].type);
}
if (field_name.compare("bias") == 0)
{
gLogVerbose << "Building bias...\n";
bias.values = fc->fields[i].data;
bias.count = fc->fields[i].length;
bias.type = fieldTypeToDataType(fc->fields[i].type);
}
}
if (typeId < 0 || typeId > 3)
{
gLogError << "SkipLayerNorm: Invalid type ID: " << typeId << std::endl;
}
if (beta.count <= 0 || beta.values == nullptr)
{
gLogError << "SkipLayerNorm: invalid beta" << std::endl;
}
if (gamma.count <= 0 || gamma.values == nullptr)
{
gLogError << "SkipLayerNorm: invalid gamma" << std::endl;
}
DataType type = static_cast<DataType>(typeId);
if (bias.values == nullptr)
{
return new SkipLayerNormPluginDynamic(name, type, ld, beta, gamma);
}
return new SkipLayerNormPluginDynamic(name, type, ld, beta, gamma, bias);
}
IPluginV2* SkipLayerNormPluginDynamicCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call SkipLayerNormPluginDynamic::destroy()
return new SkipLayerNormPluginDynamic(name, serialData, serialLength);
}
void SkipLayerNormPluginDynamicCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginDynamicCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
} // namespace bert
|
a5c5aa0d03a08b28f9f47ed2ff6b9d817f74c16c.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Shuffle intrinsics CUDA Sample
// This sample demonstrates the use of the shuffle intrinsic
// First, a simple example of a prefix sum using the shuffle to
// perform a scan operation is provided.
// Secondly, a more involved example of computing an integral image
// using the shuffle intrinsic is provided, where the shuffle
// scan operation and shuffle xor operations are used
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "shfl_integral_image.cuh"
// Scan using shfl - takes log2(n) steps
// This function demonstrates basic use of the shuffle intrinsic, __shfl_up,
// to perform a scan operation across a block.
// First, it performs a scan (prefix sum in this case) inside a warp
// Then to continue the scan operation across the block,
// each warp's sum is placed into shared memory. A single warp
// then performs a shuffle scan on that shared memory. The results
// are then uniformly added to each warp's threads.
// This pyramid type approach is continued by placing each block's
// final sum in global memory and prefix summing that via another kernel call,
// then uniformly adding across the input data via the uniform_add<<<>>> kernel.
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) {
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2) {
unsigned int mask = 0xffffffff;
int n = __shfl_up_sync(mask, value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1) {
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) {
int warp_sum = sums[lane_id];
int mask = (1 << (blockDim.x / warpSize)) - 1;
for (int i = 1; i <= (blockDim.x / warpSize); i *= 2) {
int n = __shfl_up_sync(mask, warp_sum, i, (blockDim.x / warpSize));
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0) {
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) {
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len) {
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0) {
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) {
return ((dividend % divisor) == 0) ? (dividend / divisor)
: (dividend / divisor + 1);
}
// This function verifies the shuffle scan result, for the simple
// prefix sum case.
bool CPUverify(int *h_data, int *h_result, int n_elements) {
// cpu verify
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i < n_elements; i++) {
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j < 100; j++)
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
// this verifies the row scan result for synthetic data of all 1's
unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) {
unsigned int diff = 0;
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int gold = i + 1;
diff +=
abs(static_cast<int>(gold) - static_cast<int>(h_image[j * w + i]));
}
}
return diff;
}
bool shuffle_simple_test(int argc, char **argv) {
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65536;
int sz = sizeof(int) * n_elements;
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_data),
sizeof(int) * n_elements));
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_result),
sizeof(int) * n_elements));
// initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i < n_elements; i++) {
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize;
int partial_sz = n_partialSums * sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n", n_elements,
n_elements / blockSize);
int p_blockSize = min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(
hipMalloc(reinterpret_cast<void **>(&d_partial_sums), partial_sz));
checkCudaErrors(hipMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(
hipHostMalloc(reinterpret_cast<void **>(&h_partial_sums), partial_sz));
checkCudaErrors(hipMemcpy(d_data, h_data, sz, hipMemcpyHostToDevice));
checkCudaErrors(hipEventRecord(start, 0));
hipLaunchKernelGGL(( shfl_scan_test), dim3(gridSize), dim3(blockSize), shmem_sz, 0, d_data, 32, d_partial_sums);
hipLaunchKernelGGL(( shfl_scan_test), dim3(p_gridSize), dim3(p_blockSize), shmem_sz, 0, d_partial_sums, 32);
hipLaunchKernelGGL(( uniform_add), dim3(gridSize - 1), dim3(blockSize), 0, 0, d_data + blockSize, d_partial_sums,
n_elements);
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(hipMemcpy(h_result, d_data, sz, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_partial_sums, d_partial_sums, partial_sz,
hipMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et,
n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(hipHostFree(h_data));
checkCudaErrors(hipHostFree(h_result));
checkCudaErrors(hipHostFree(h_partial_sums));
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_partial_sums));
return bTestResult;
}
// This function tests creation of an integral image using
// synthetic data, of size 1920x1080 pixels greyscale.
bool shuffle_integral_image_test() {
char *d_data;
unsigned int *h_image;
unsigned int *d_integral_image;
int w = 1920;
int h = 1080;
int n_elements = w * h;
int sz = sizeof(unsigned int) * n_elements;
printf("\nComputing Integral Image Test on size %d x %d synthetic data\n", w,
h);
printf("---------------------------------------------------\n");
checkCudaErrors(hipHostMalloc(reinterpret_cast<void **>(&h_image), sz));
// fill test "image" with synthetic 1's data
memset(h_image, 0, sz);
// each thread handles 16 values, use 1 block/row
int blockSize = iDivUp(w, 16);
// launch 1 block / row
int gridSize = h;
// Create a synthetic image for testing
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_integral_image),
n_elements * sizeof(int) * 4));
checkCudaErrors(hipMemset(d_data, 1, sz));
checkCudaErrors(hipMemset(d_integral_image, 0, sz));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float et = 0;
unsigned int err;
// Execute scan line prefix sum kernel, and time it
hipEventRecord(start);
hipLaunchKernelGGL(( shfl_intimage_rows), dim3(gridSize), dim3(blockSize), 0, 0,
reinterpret_cast<uint4 *>(d_data),
reinterpret_cast<uint4 *>(d_integral_image));
hipEventRecord(stop);
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&et, start, stop));
printf("Method: Fast Time (GPU Timer): %f ms ", et);
// verify the scan line results
checkCudaErrors(
hipMemcpy(h_image, d_integral_image, sz, hipMemcpyDeviceToHost));
err = verifyDataRowSums(h_image, w, h);
printf("Diff = %d\n", err);
// Execute column prefix sum kernel and time it
dim3 blockSz(32, 8);
dim3 testGrid(w / blockSz.x, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( shfl_vertical_shfl), dim3(testGrid), dim3(blockSz), 0, 0, (unsigned int *)d_integral_image, w,
h);
hipEventRecord(stop);
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&et, start, stop));
printf("Method: Vertical Scan Time (GPU Timer): %f ms ", et);
// Verify the column results
checkCudaErrors(
hipMemcpy(h_image, d_integral_image, sz, hipMemcpyDeviceToHost));
printf("\n");
int finalSum = h_image[w * h - 1];
printf("CheckSum: %d, (expect %dx%d=%d)\n", finalSum, w, h, w * h);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_integral_image));
checkCudaErrors(hipHostFree(h_image));
// verify final sum: if the final value in the corner is the same as the size
// of the buffer (all 1's) then the integral image was generated successfully
return (finalSum == w * h) ? true : false;
}
int main(int argc, char *argv[]) {
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
bool intTest = shuffle_integral_image_test();
bTestResult = simpleTest & intTest;
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| a5c5aa0d03a08b28f9f47ed2ff6b9d817f74c16c.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Shuffle intrinsics CUDA Sample
// This sample demonstrates the use of the shuffle intrinsic
// First, a simple example of a prefix sum using the shuffle to
// perform a scan operation is provided.
// Secondly, a more involved example of computing an integral image
// using the shuffle intrinsic is provided, where the shuffle
// scan operation and shuffle xor operations are used
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "shfl_integral_image.cuh"
// Scan using shfl - takes log2(n) steps
// This function demonstrates basic use of the shuffle intrinsic, __shfl_up,
// to perform a scan operation across a block.
// First, it performs a scan (prefix sum in this case) inside a warp
// Then to continue the scan operation across the block,
// each warp's sum is placed into shared memory. A single warp
// then performs a shuffle scan on that shared memory. The results
// are then uniformly added to each warp's threads.
// This pyramid type approach is continued by placing each block's
// final sum in global memory and prefix summing that via another kernel call,
// then uniformly adding across the input data via the uniform_add<<<>>> kernel.
__global__ void shfl_scan_test(int *data, int width, int *partial_sums = NULL) {
extern __shared__ int sums[];
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
int lane_id = id % warpSize;
// determine a warp_id within a block
int warp_id = threadIdx.x / warpSize;
// Below is the basic structure of using a shfl instruction
// for a scan.
// Record "value" as a variable - we accumulate it along the way
int value = data[id];
// Now accumulate in log steps up the chain
// compute sums, with another thread's value who is
// distance delta away (i). Note
// those threads where the thread 'i' away would have
// been out of bounds of the warp are unaffected. This
// creates the scan sum.
#pragma unroll
for (int i = 1; i <= width; i *= 2) {
unsigned int mask = 0xffffffff;
int n = __shfl_up_sync(mask, value, i, width);
if (lane_id >= i) value += n;
}
// value now holds the scan value for the individual thread
// next sum the largest values for each warp
// write the sum of the warp to smem
if (threadIdx.x % warpSize == warpSize - 1) {
sums[warp_id] = value;
}
__syncthreads();
//
// scan sum the warp sums
// the same shfl scan operation, but performed on warp sums
//
if (warp_id == 0 && lane_id < (blockDim.x / warpSize)) {
int warp_sum = sums[lane_id];
int mask = (1 << (blockDim.x / warpSize)) - 1;
for (int i = 1; i <= (blockDim.x / warpSize); i *= 2) {
int n = __shfl_up_sync(mask, warp_sum, i, (blockDim.x / warpSize));
if (lane_id >= i) warp_sum += n;
}
sums[lane_id] = warp_sum;
}
__syncthreads();
// perform a uniform add across warps in the block
// read neighbouring warp's sum and add it to threads value
int blockSum = 0;
if (warp_id > 0) {
blockSum = sums[warp_id - 1];
}
value += blockSum;
// Now write out our result
data[id] = value;
// last thread has sum, write write out the block's sum
if (partial_sums != NULL && threadIdx.x == blockDim.x - 1) {
partial_sums[blockIdx.x] = value;
}
}
// Uniform add: add partial sums array
__global__ void uniform_add(int *data, int *partial_sums, int len) {
__shared__ int buf;
int id = ((blockIdx.x * blockDim.x) + threadIdx.x);
if (id > len) return;
if (threadIdx.x == 0) {
buf = partial_sums[blockIdx.x];
}
__syncthreads();
data[id] += buf;
}
static unsigned int iDivUp(unsigned int dividend, unsigned int divisor) {
return ((dividend % divisor) == 0) ? (dividend / divisor)
: (dividend / divisor + 1);
}
// This function verifies the shuffle scan result, for the simple
// prefix sum case.
bool CPUverify(int *h_data, int *h_result, int n_elements) {
// cpu verify
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
int diff = 0;
for (int i = 0; i < n_elements; i++) {
diff += h_data[i] - h_result[i];
}
printf("CPU verify result diff (GPUvsCPU) = %d\n", diff);
bool bTestResult = false;
if (diff == 0) bTestResult = true;
StopWatchInterface *hTimer = NULL;
sdkCreateTimer(&hTimer);
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (int j = 0; j < 100; j++)
for (int i = 0; i < n_elements - 1; i++) {
h_data[i + 1] = h_data[i] + h_data[i + 1];
}
sdkStopTimer(&hTimer);
double cput = sdkGetTimerValue(&hTimer);
printf("CPU sum (naive) took %f ms\n", cput / 100);
return bTestResult;
}
// this verifies the row scan result for synthetic data of all 1's
unsigned int verifyDataRowSums(unsigned int *h_image, int w, int h) {
unsigned int diff = 0;
for (int j = 0; j < h; j++) {
for (int i = 0; i < w; i++) {
int gold = i + 1;
diff +=
abs(static_cast<int>(gold) - static_cast<int>(h_image[j * w + i]));
}
}
return diff;
}
bool shuffle_simple_test(int argc, char **argv) {
int *h_data, *h_partial_sums, *h_result;
int *d_data, *d_partial_sums;
const int n_elements = 65536;
int sz = sizeof(int) * n_elements;
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_data),
sizeof(int) * n_elements));
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_result),
sizeof(int) * n_elements));
// initialize data:
printf("Computing Simple Sum test\n");
printf("---------------------------------------------------\n");
printf("Initialize test data [1, 1, 1...]\n");
for (int i = 0; i < n_elements; i++) {
h_data[i] = 1;
}
int blockSize = 256;
int gridSize = n_elements / blockSize;
int nWarps = blockSize / 32;
int shmem_sz = nWarps * sizeof(int);
int n_partialSums = n_elements / blockSize;
int partial_sz = n_partialSums * sizeof(int);
printf("Scan summation for %d elements, %d partial sums\n", n_elements,
n_elements / blockSize);
int p_blockSize = min(n_partialSums, blockSize);
int p_gridSize = iDivUp(n_partialSums, p_blockSize);
printf("Partial summing %d elements with %d blocks of size %d\n",
n_partialSums, p_gridSize, p_blockSize);
// initialize a timer
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float et = 0;
float inc = 0;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(
cudaMalloc(reinterpret_cast<void **>(&d_partial_sums), partial_sz));
checkCudaErrors(cudaMemset(d_partial_sums, 0, partial_sz));
checkCudaErrors(
cudaMallocHost(reinterpret_cast<void **>(&h_partial_sums), partial_sz));
checkCudaErrors(cudaMemcpy(d_data, h_data, sz, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start, 0));
shfl_scan_test<<<gridSize, blockSize, shmem_sz>>>(d_data, 32, d_partial_sums);
shfl_scan_test<<<p_gridSize, p_blockSize, shmem_sz>>>(d_partial_sums, 32);
uniform_add<<<gridSize - 1, blockSize>>>(d_data + blockSize, d_partial_sums,
n_elements);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&inc, start, stop));
et += inc;
checkCudaErrors(cudaMemcpy(h_result, d_data, sz, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_partial_sums, d_partial_sums, partial_sz,
cudaMemcpyDeviceToHost));
printf("Test Sum: %d\n", h_partial_sums[n_partialSums - 1]);
printf("Time (ms): %f\n", et);
printf("%d elements scanned in %f ms -> %f MegaElements/s\n", n_elements, et,
n_elements / (et / 1000.0f) / 1000000.0f);
bool bTestResult = CPUverify(h_data, h_result, n_elements);
checkCudaErrors(cudaFreeHost(h_data));
checkCudaErrors(cudaFreeHost(h_result));
checkCudaErrors(cudaFreeHost(h_partial_sums));
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_partial_sums));
return bTestResult;
}
// This function tests creation of an integral image using
// synthetic data, of size 1920x1080 pixels greyscale.
bool shuffle_integral_image_test() {
char *d_data;
unsigned int *h_image;
unsigned int *d_integral_image;
int w = 1920;
int h = 1080;
int n_elements = w * h;
int sz = sizeof(unsigned int) * n_elements;
printf("\nComputing Integral Image Test on size %d x %d synthetic data\n", w,
h);
printf("---------------------------------------------------\n");
checkCudaErrors(cudaMallocHost(reinterpret_cast<void **>(&h_image), sz));
// fill test "image" with synthetic 1's data
memset(h_image, 0, sz);
// each thread handles 16 values, use 1 block/row
int blockSize = iDivUp(w, 16);
// launch 1 block / row
int gridSize = h;
// Create a synthetic image for testing
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_data), sz));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_integral_image),
n_elements * sizeof(int) * 4));
checkCudaErrors(cudaMemset(d_data, 1, sz));
checkCudaErrors(cudaMemset(d_integral_image, 0, sz));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float et = 0;
unsigned int err;
// Execute scan line prefix sum kernel, and time it
cudaEventRecord(start);
shfl_intimage_rows<<<gridSize, blockSize>>>(
reinterpret_cast<uint4 *>(d_data),
reinterpret_cast<uint4 *>(d_integral_image));
cudaEventRecord(stop);
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&et, start, stop));
printf("Method: Fast Time (GPU Timer): %f ms ", et);
// verify the scan line results
checkCudaErrors(
cudaMemcpy(h_image, d_integral_image, sz, cudaMemcpyDeviceToHost));
err = verifyDataRowSums(h_image, w, h);
printf("Diff = %d\n", err);
// Execute column prefix sum kernel and time it
dim3 blockSz(32, 8);
dim3 testGrid(w / blockSz.x, 1);
cudaEventRecord(start);
shfl_vertical_shfl<<<testGrid, blockSz>>>((unsigned int *)d_integral_image, w,
h);
cudaEventRecord(stop);
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&et, start, stop));
printf("Method: Vertical Scan Time (GPU Timer): %f ms ", et);
// Verify the column results
checkCudaErrors(
cudaMemcpy(h_image, d_integral_image, sz, cudaMemcpyDeviceToHost));
printf("\n");
int finalSum = h_image[w * h - 1];
printf("CheckSum: %d, (expect %dx%d=%d)\n", finalSum, w, h, w * h);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_integral_image));
checkCudaErrors(cudaFreeHost(h_image));
// verify final sum: if the final value in the corner is the same as the size
// of the buffer (all 1's) then the integral image was generated successfully
return (finalSum == w * h) ? true : false;
}
int main(int argc, char *argv[]) {
// Initialization. The shuffle intrinsic is not available on SM < 3.0
// so waive the test if the hardware is not present.
int cuda_device = 0;
printf("Starting shfl_scan\n");
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// __shfl intrinsic needs SM 3.0 or higher
if (deviceProp.major < 3) {
printf("> __shfl() intrinsic requires device SM 3.0+\n");
printf("> Waiving test.\n");
exit(EXIT_WAIVED);
}
bool bTestResult = true;
bool simpleTest = shuffle_simple_test(argc, argv);
bool intTest = shuffle_integral_image_test();
bTestResult = simpleTest & intTest;
exit((bTestResult) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
collatz_hyb.hip | // !!! This is a file automatically generated by hipify!!!
/*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <hip/hip_runtime.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project)
const long i = start + 2*(threadIdx.x + blockIdx.x * (long) blockDim.x);
if(i <= stop){
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if(len > *maxlen)
atomicMax(maxlen, len);
}
}
void GPU_Init()
{
int maxlen = 0;
if (hipSuccess != hipMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (hipSuccess != hipMemcpy(d_maxlen, &maxlen, sizeof(int), hipMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
hipLaunchKernelGGL(( collatz), dim3(((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, start, stop, d_maxlen);
}
}
int GPU_Fini()
{
int maxlen;
// todo: copy the result from the device to the host and free the device memory
if (hipSuccess != hipMemcpy(&maxlen, d_maxlen, sizeof(int), hipMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);}
hipFree(d_maxlen);
return maxlen;
}
| collatz_hyb.cu | /*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project)
const long i = start + 2*(threadIdx.x + blockIdx.x * (long) blockDim.x);
if(i <= stop){
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if(len > *maxlen)
atomicMax(maxlen, len);
}
}
void GPU_Init()
{
int maxlen = 0;
if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
collatz<<<((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, stop, d_maxlen);
}
}
int GPU_Fini()
{
int maxlen;
// todo: copy the result from the device to the host and free the device memory
if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);}
cudaFree(d_maxlen);
return maxlen;
}
|
0dde6e2a2939b47942770c527e8f7d5f61023e6c.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/gpu.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cudev;
/////////////////////////////////////////////////////
/// create
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
size_t esz = elemSize();
void* devPtr;
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( hipMallocPitch(&devPtr, &step, esz * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( hipMalloc(&devPtr, esz * cols * rows) );
step = esz * cols;
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data = static_cast<uchar*>(devPtr);
dataend = data + nettosize;
refcount = static_cast<int*>(fastMalloc(sizeof(*refcount)));
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::gpu::GpuMat::release()
{
if (refcount && CV_XADD(refcount, -1) == 1)
{
hipFree(datastart);
fastFree(refcount);
}
data = datastart = dataend = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::gpu::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( hipMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice) );
}
void cv::gpu::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, hipMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::gpu::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost) );
}
void cv::gpu::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::gpu::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( hipMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice) );
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
hipStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( hipMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, hipMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransform_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransform(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransform(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( hipMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( hipMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransform_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransform_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
| 0dde6e2a2939b47942770c527e8f7d5f61023e6c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/core/gpu.hpp"
#include "opencv2/cudev.hpp"
using namespace cv;
using namespace cv::gpu;
using namespace cv::cudev;
/////////////////////////////////////////////////////
/// create
void cv::gpu::GpuMat::create(int _rows, int _cols, int _type)
{
CV_DbgAssert( _rows >= 0 && _cols >= 0 );
_type &= Mat::TYPE_MASK;
if (rows == _rows && cols == _cols && type() == _type && data)
return;
if (data)
release();
if (_rows > 0 && _cols > 0)
{
flags = Mat::MAGIC_VAL + _type;
rows = _rows;
cols = _cols;
size_t esz = elemSize();
void* devPtr;
if (rows > 1 && cols > 1)
{
CV_CUDEV_SAFE_CALL( cudaMallocPitch(&devPtr, &step, esz * cols, rows) );
}
else
{
// Single row or single column must be continuous
CV_CUDEV_SAFE_CALL( cudaMalloc(&devPtr, esz * cols * rows) );
step = esz * cols;
}
if (esz * cols == step)
flags |= Mat::CONTINUOUS_FLAG;
int64 _nettosize = static_cast<int64>(step) * rows;
size_t nettosize = static_cast<size_t>(_nettosize);
datastart = data = static_cast<uchar*>(devPtr);
dataend = data + nettosize;
refcount = static_cast<int*>(fastMalloc(sizeof(*refcount)));
*refcount = 1;
}
}
/////////////////////////////////////////////////////
/// release
void cv::gpu::GpuMat::release()
{
if (refcount && CV_XADD(refcount, -1) == 1)
{
cudaFree(datastart);
fastFree(refcount);
}
data = datastart = dataend = 0;
step = rows = cols = 0;
refcount = 0;
}
/////////////////////////////////////////////////////
/// upload
void cv::gpu::GpuMat::upload(InputArray arr)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice) );
}
void cv::gpu::GpuMat::upload(InputArray arr, Stream& _stream)
{
Mat mat = arr.getMat();
CV_DbgAssert( !mat.empty() );
create(mat.size(), mat.type());
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(data, step, mat.data, mat.step, cols * elemSize(), rows, cudaMemcpyHostToDevice, stream) );
}
/////////////////////////////////////////////////////
/// download
void cv::gpu::GpuMat::download(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost) );
}
void cv::gpu::GpuMat::download(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
Mat dst = _dst.getMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToHost, stream) );
}
/////////////////////////////////////////////////////
/// copyTo
void cv::gpu::GpuMat::copyTo(OutputArray _dst) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
CV_CUDEV_SAFE_CALL( cudaMemcpy2D(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice) );
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, Stream& _stream) const
{
CV_DbgAssert( !empty() );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
cudaStream_t stream = StreamAccessor::getStream(_stream);
CV_CUDEV_SAFE_CALL( cudaMemcpy2DAsync(dst.data, dst.step, data, step, cols * elemSize(), rows, cudaMemcpyDeviceToDevice, stream) );
}
namespace
{
template <size_t size> struct CopyToPolicy : DefaultTransformPolicy
{
};
template <> struct CopyToPolicy<4> : DefaultTransformPolicy
{
enum {
shift = 2
};
};
template <> struct CopyToPolicy<8> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T>
void copyWithMask(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream)
{
gridTransform_< CopyToPolicy<sizeof(typename VecTraits<T>::elem_type)> >(globPtr<T>(src), globPtr<T>(dst), identity<T>(), globPtr<uchar>(mask), stream);
}
}
void cv::gpu::GpuMat::copyTo(OutputArray _dst, InputArray _mask, Stream& stream) const
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.depth() == CV_8U && (mask.channels() == 1 || mask.channels() == channels()) );
_dst.create(size(), type());
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, const GpuMat& mask, Stream& stream);
static const func_t funcs[9][4] =
{
{0,0,0,0},
{copyWithMask<uchar>, copyWithMask<uchar2>, copyWithMask<uchar3>, copyWithMask<uchar4>},
{copyWithMask<ushort>, copyWithMask<ushort2>, copyWithMask<ushort3>, copyWithMask<ushort4>},
{0,0,0,0},
{copyWithMask<int>, copyWithMask<int2>, copyWithMask<int3>, copyWithMask<int4>},
{0,0,0,0},
{0,0,0,0},
{0,0,0,0},
{copyWithMask<double>, copyWithMask<double2>, copyWithMask<double3>, copyWithMask<double4>}
};
if (mask.channels() == channels())
{
const func_t func = funcs[elemSize1()][0];
CV_DbgAssert( func != 0 );
func(reshape(1), dst.reshape(1), mask.reshape(1), stream);
}
else
{
const func_t func = funcs[elemSize1()][channels() - 1];
CV_DbgAssert( func != 0 );
func(*this, dst, mask, stream);
}
}
/////////////////////////////////////////////////////
/// setTo
namespace
{
template <typename T>
void setToWithOutMask(const GpuMat& mat, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransform(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), stream);
}
template <typename T>
void setToWithMask(const GpuMat& mat, const GpuMat& mask, Scalar _scalar, Stream& stream)
{
Scalar_<typename VecTraits<T>::elem_type> scalar = _scalar;
gridTransform(constantPtr(VecTraits<T>::make(scalar.val), mat.rows, mat.cols), globPtr<T>(mat), identity<T>(), globPtr<uchar>(mask), stream);
}
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
if (value[0] == 0.0 && value[1] == 0.0 && value[2] == 0.0 && value[3] == 0.0)
{
// Zero fill
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, 0, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, 0, cols * elemSize(), rows) );
return *this;
}
if (depth() == CV_8U)
{
const int cn = channels();
if (cn == 1
|| (cn == 2 && value[0] == value[1])
|| (cn == 3 && value[0] == value[1] && value[0] == value[2])
|| (cn == 4 && value[0] == value[1] && value[0] == value[2] && value[0] == value[3]))
{
const int val = cv::saturate_cast<uchar>(value[0]);
if (stream)
CV_CUDEV_SAFE_CALL( cudaMemset2DAsync(data, step, val, cols * elemSize(), rows, StreamAccessor::getStream(stream)) );
else
CV_CUDEV_SAFE_CALL( cudaMemset2D(data, step, val, cols * elemSize(), rows) );
return *this;
}
}
typedef void (*func_t)(const GpuMat& mat, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithOutMask<uchar>,setToWithOutMask<uchar2>,setToWithOutMask<uchar3>,setToWithOutMask<uchar4>},
{setToWithOutMask<schar>,setToWithOutMask<char2>,setToWithOutMask<char3>,setToWithOutMask<char4>},
{setToWithOutMask<ushort>,setToWithOutMask<ushort2>,setToWithOutMask<ushort3>,setToWithOutMask<ushort4>},
{setToWithOutMask<short>,setToWithOutMask<short2>,setToWithOutMask<short3>,setToWithOutMask<short4>},
{setToWithOutMask<int>,setToWithOutMask<int2>,setToWithOutMask<int3>,setToWithOutMask<int4>},
{setToWithOutMask<float>,setToWithOutMask<float2>,setToWithOutMask<float3>,setToWithOutMask<float4>},
{setToWithOutMask<double>,setToWithOutMask<double2>,setToWithOutMask<double3>,setToWithOutMask<double4>}
};
funcs[depth()][channels() - 1](*this, value, stream);
return *this;
}
GpuMat& cv::gpu::GpuMat::setTo(Scalar value, InputArray _mask, Stream& stream)
{
CV_DbgAssert( !empty() );
CV_DbgAssert( depth() <= CV_64F && channels() <= 4 );
GpuMat mask = _mask.getGpuMat();
CV_DbgAssert( size() == mask.size() && mask.type() == CV_8UC1 );
typedef void (*func_t)(const GpuMat& mat, const GpuMat& mask, Scalar scalar, Stream& stream);
static const func_t funcs[7][4] =
{
{setToWithMask<uchar>,setToWithMask<uchar2>,setToWithMask<uchar3>,setToWithMask<uchar4>},
{setToWithMask<schar>,setToWithMask<char2>,setToWithMask<char3>,setToWithMask<char4>},
{setToWithMask<ushort>,setToWithMask<ushort2>,setToWithMask<ushort3>,setToWithMask<ushort4>},
{setToWithMask<short>,setToWithMask<short2>,setToWithMask<short3>,setToWithMask<short4>},
{setToWithMask<int>,setToWithMask<int2>,setToWithMask<int3>,setToWithMask<int4>},
{setToWithMask<float>,setToWithMask<float2>,setToWithMask<float3>,setToWithMask<float4>},
{setToWithMask<double>,setToWithMask<double2>,setToWithMask<double3>,setToWithMask<double4>}
};
funcs[depth()][channels() - 1](*this, mask, value, stream);
return *this;
}
/////////////////////////////////////////////////////
/// convertTo
namespace
{
template <typename T> struct ConvertToPolicy : DefaultTransformPolicy
{
};
template <> struct ConvertToPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename T, typename D>
void convertToNoScale(const GpuMat& src, const GpuMat& dst, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
gridTransform_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), saturate_cast_func<T, D>(), stream);
}
template <typename T, typename D, typename S> struct Convertor : unary_function<T, D>
{
S alpha;
S beta;
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::parameter_type src) const
{
return cudev::saturate_cast<D>(alpha * src + beta);
}
};
template <typename T, typename D>
void convertToScale(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream)
{
typedef typename VecTraits<T>::elem_type src_elem_type;
typedef typename VecTraits<D>::elem_type dst_elem_type;
typedef typename LargerType<src_elem_type, float>::type larger_elem_type;
typedef typename LargerType<float, dst_elem_type>::type scalar_type;
Convertor<T, D, scalar_type> op;
op.alpha = cv::saturate_cast<scalar_type>(alpha);
op.beta = cv::saturate_cast<scalar_type>(beta);
gridTransform_< ConvertToPolicy<scalar_type> >(globPtr<T>(src), globPtr<D>(dst), op, stream);
}
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKE_TYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
if (sdepth == ddepth)
{
if (stream)
copyTo(_dst, stream);
else
copyTo(_dst);
return;
}
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F );
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, Stream& stream);
static const func_t funcs[7][7] =
{
{0, convertToNoScale<uchar, schar>, convertToNoScale<uchar, ushort>, convertToNoScale<uchar, short>, convertToNoScale<uchar, int>, convertToNoScale<uchar, float>, convertToNoScale<uchar, double>},
{convertToNoScale<schar, uchar>, 0, convertToNoScale<schar, ushort>, convertToNoScale<schar, short>, convertToNoScale<schar, int>, convertToNoScale<schar, float>, convertToNoScale<schar, double>},
{convertToNoScale<ushort, uchar>, convertToNoScale<ushort, schar>, 0, convertToNoScale<ushort, short>, convertToNoScale<ushort, int>, convertToNoScale<ushort, float>, convertToNoScale<ushort, double>},
{convertToNoScale<short, uchar>, convertToNoScale<short, schar>, convertToNoScale<short, ushort>, 0, convertToNoScale<short, int>, convertToNoScale<short, float>, convertToNoScale<short, double>},
{convertToNoScale<int, uchar>, convertToNoScale<int, schar>, convertToNoScale<int, ushort>, convertToNoScale<int, short>, 0, convertToNoScale<int, float>, convertToNoScale<int, double>},
{convertToNoScale<float, uchar>, convertToNoScale<float, schar>, convertToNoScale<float, ushort>, convertToNoScale<float, short>, convertToNoScale<float, int>, 0, convertToNoScale<float, double>},
{convertToNoScale<double, uchar>, convertToNoScale<double, schar>, convertToNoScale<double, ushort>, convertToNoScale<double, short>, convertToNoScale<double, int>, convertToNoScale<double, float>, 0}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), stream);
}
void cv::gpu::GpuMat::convertTo(OutputArray _dst, int rtype, double alpha, double beta, Stream& stream) const
{
if (rtype < 0)
rtype = type();
else
rtype = CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
const int sdepth = depth();
const int ddepth = CV_MAT_DEPTH(rtype);
GpuMat src = *this;
_dst.create(size(), rtype);
GpuMat dst = _dst.getGpuMat();
typedef void (*func_t)(const GpuMat& src, const GpuMat& dst, double alpha, double beta, Stream& stream);
static const func_t funcs[7][7] =
{
{convertToScale<uchar, uchar>, convertToScale<uchar, schar>, convertToScale<uchar, ushort>, convertToScale<uchar, short>, convertToScale<uchar, int>, convertToScale<uchar, float>, convertToScale<uchar, double>},
{convertToScale<schar, uchar>, convertToScale<schar, schar>, convertToScale<schar, ushort>, convertToScale<schar, short>, convertToScale<schar, int>, convertToScale<schar, float>, convertToScale<schar, double>},
{convertToScale<ushort, uchar>, convertToScale<ushort, schar>, convertToScale<ushort, ushort>, convertToScale<ushort, short>, convertToScale<ushort, int>, convertToScale<ushort, float>, convertToScale<ushort, double>},
{convertToScale<short, uchar>, convertToScale<short, schar>, convertToScale<short, ushort>, convertToScale<short, short>, convertToScale<short, int>, convertToScale<short, float>, convertToScale<short, double>},
{convertToScale<int, uchar>, convertToScale<int, schar>, convertToScale<int, ushort>, convertToScale<int, short>, convertToScale<int, int>, convertToScale<int, float>, convertToScale<int, double>},
{convertToScale<float, uchar>, convertToScale<float, schar>, convertToScale<float, ushort>, convertToScale<float, short>, convertToScale<float, int>, convertToScale<float, float>, convertToScale<float, double>},
{convertToScale<double, uchar>, convertToScale<double, schar>, convertToScale<double, ushort>, convertToScale<double, short>, convertToScale<double, int>, convertToScale<double, float>, convertToScale<double, double>}
};
funcs[sdepth][ddepth](reshape(1), dst.reshape(1), alpha, beta, stream);
}
#endif
|
39a91631d11187d0a171fc7ffef092d2d31cbbe1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHReduceApplyUtils.cuh"
#include <THH/THHApply.cuh>
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
template<typename Dtype>
__global__ void SpatialReflectionPadding_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.getSize(2) * output.getSize(3)) {
return;
}
int outputPointX = outputPointId % output.getSize(3);
int outputPointY = outputPointId / output.getSize(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = abs(outputPointX - padL)
- abs(outputPointX - (input.getSize(3) + padL - 1))
- outputPointX
+ 2 * padL + input.getSize(3) - 1
- oStartX + iStartX;
int inputPointY = abs(outputPointY - padT)
- abs(outputPointY - (input.getSize(2) + padT - 1))
- outputPointY
+ 2 * padT + input.getSize(2) - 1
- oStartY + iStartY;
Dtype valueToCopy = input[batch][plane][inputPointY][inputPointX];
output[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename Dtype>
__global__ void SpatialReflectionPadding_updateGradInput(
THCDeviceTensor<Dtype, 4> gradInput,
THCDeviceTensor<Dtype, 4> gradOutput,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(3);
int outputPointY = outputPointId / gradOutput.getSize(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = abs(outputPointX - padL)
- abs(outputPointX - (gradInput.getSize(3) + padL - 1))
- outputPointX
+ 2 * padL + gradInput.getSize(3) - 1
- oStartX + iStartX;
int inputPointY = abs(outputPointY - padT)
- abs(outputPointY - (gradInput.getSize(2) + padT - 1))
- outputPointY
+ 2 * padT + gradInput.getSize(2) - 1
- oStartY + iStartY;
Dtype valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy);
}
#include "generic/SpatialReflectionPadding.cu"
#include "THHGenerateFloatTypes.h"
| 39a91631d11187d0a171fc7ffef092d2d31cbbe1.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCReduceApplyUtils.cuh"
#include <THC/THCApply.cuh>
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
template<typename Dtype>
__global__ void SpatialReflectionPadding_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= output.getSize(2) * output.getSize(3)) {
return;
}
int outputPointX = outputPointId % output.getSize(3);
int outputPointY = outputPointId / output.getSize(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = abs(outputPointX - padL)
- abs(outputPointX - (input.getSize(3) + padL - 1))
- outputPointX
+ 2 * padL + input.getSize(3) - 1
- oStartX + iStartX;
int inputPointY = abs(outputPointY - padT)
- abs(outputPointY - (input.getSize(2) + padT - 1))
- outputPointY
+ 2 * padT + input.getSize(2) - 1
- oStartY + iStartY;
Dtype valueToCopy = input[batch][plane][inputPointY][inputPointX];
output[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename Dtype>
__global__ void SpatialReflectionPadding_updateGradInput(
THCDeviceTensor<Dtype, 4> gradInput,
THCDeviceTensor<Dtype, 4> gradOutput,
int padT, int padB, int padL, int padR) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(3);
int outputPointY = outputPointId / gradOutput.getSize(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = abs(outputPointX - padL)
- abs(outputPointX - (gradInput.getSize(3) + padL - 1))
- outputPointX
+ 2 * padL + gradInput.getSize(3) - 1
- oStartX + iStartX;
int inputPointY = abs(outputPointY - padT)
- abs(outputPointY - (gradInput.getSize(2) + padT - 1))
- outputPointY
+ 2 * padT + gradInput.getSize(2) - 1
- oStartY + iStartY;
Dtype valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy);
}
#include "generic/SpatialReflectionPadding.cu"
#include "THCGenerateFloatTypes.h"
|
e92bdf4d4bb2e3dac2678275bcbd99b99e920089.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
template <typename T, typename AccT>
__global__ void GroupNormForwardGetMeanAndVar(const T* x,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT* mean,
AccT* var) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_mean = static_cast<AccT>(0);
AccT x_var = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
x_mean += val;
x_var += val * val;
}
x_mean /= number * imsize;
x_var /= number * imsize;
CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean);
CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var);
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormForward(const T* x,
const AccT* mean,
const AccT* var,
const T* scale,
const T* bias,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT epsilon,
T* y,
AccT* real_var,
const DataLayout data_layout) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int ccid = gid * group_size + cid;
if (ccid >= C) return;
auto ng = bid * groups + gid;
AccT x_mean = mean[ng];
AccT x_var = var[ng];
x_var = x_var - x_mean * x_mean;
AccT var_inv = rsqrt(x_var + epsilon);
if (cid == 0 && threadIdx.x == 0) {
real_var[ng] = x_var;
}
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid, wid;
int index = (bid * C + ccid) * imsize + imid;
if (data_layout == DataLayout::kNCHW) {
val = static_cast<AccT>(x[index]);
} else {
hid = imid / W;
wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
}
val = (val - x_mean) * var_inv;
if (flags & kHasScale) {
val *= static_cast<AccT>(scale[ccid]);
}
if (flags & kHasBias) {
val += static_cast<AccT>(bias[ccid]);
}
if (data_layout == DataLayout::kNCHW) {
y[index] = static_cast<T>(val);
} else {
y[(bid * H + hid) * W * C + wid * C + ccid] = static_cast<T>(val);
}
}
}
template <typename T, typename Context>
void GroupNormKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
dev_ctx.template Alloc<T>(y);
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
// temp_var is used to calculate the mean^2
DenseTensor temp_var;
temp_var.Resize(var->dims());
dev_ctx.template Alloc<AccT>(&temp_var);
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
auto* x_data = x.data<T>();
auto* y_data = y->data<T>();
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
auto* temp_var_data = temp_var.data<AccT>();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
#ifdef __HIPCC__
int block_size = ::max(::min(256, imsize), 64);
#else
int block_size = ::min(1024, imsize);
#endif
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * imsize;
const int max_num_threads = 1024;
int max_block_size = ::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = ::max(block_size_nchw, kps::details::kWarpSize);
dim3 grids(x_dims[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
hipLaunchKernelGGL(( ScalarGetMeanAndVarNCHW<T, AccT>), dim3(grids), dim3(blocks), 0, dev_ctx.stream(),
x_data, mean_data, temp_var_data, size);
} else {
hipLaunchKernelGGL(( VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>)
, dim3(grids), dim3(blocks), 0, dev_ctx.stream(),
x_data, mean_data, temp_var_data, size);
}
} else {
set_zero_AccT(dev_ctx, mean, static_cast<AccT>(0));
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
hipLaunchKernelGGL(( GroupNormForwardGetMeanAndVar<T, AccT>)
, dim3(grid), dim3(threads), 0, dev_ctx.stream(), x_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
mean_data,
temp_var_data);
}
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormForward,
x_data,
mean_data,
temp_var_data,
scale_data,
bias_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
static_cast<AccT>(epsilon),
y_data,
var_data,
data_layout);
}
template <typename T, typename AccT>
void GroupNormDirectCUDAFunctor<T, AccT>::operator()(
gpuStream_t stream,
const T* input,
std::vector<int> input_shape,
const T* bias,
const T* scale,
AccT* temp_variance,
int groups,
float eps,
T* output,
AccT* mean,
AccT* variance,
const DataLayout data_layout) {
const auto input_ddim = phi::make_ddim(input_shape);
const int C =
(data_layout == DataLayout::kNCHW ? input_ddim[1]
: input_ddim[input_ddim.size() - 1]);
const int group_size = C / groups;
const int W =
(data_layout == DataLayout::kNCHW ? input_ddim[input_ddim.size() - 1]
: input_ddim[input_ddim.size() - 2]);
int image_size = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < input_ddim.size(); ++i) {
image_size *= input_ddim[i];
}
} else {
for (int i = 1; i < input_ddim.size() - 1; ++i) {
image_size *= input_ddim[i];
}
}
#ifdef __HIPCC__
int block_size = ::max(::min(256, image_size), 64);
#else
int block_size = ::min(1024, image_size);
#endif
dim3 grid(group_size, groups, input_ddim[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * image_size; // group element size
const int max_num_threads = 1024;
int max_block_size = ::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = ::max(block_size_nchw, phi::kps::details::kWarpSize);
dim3 grids(input_ddim[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
hipLaunchKernelGGL(( phi::ScalarGetMeanAndVarNCHW<T, AccT>)
, dim3(grids), dim3(blocks), 0, stream, input, mean, temp_variance, size);
} else {
hipLaunchKernelGGL(( phi::VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>)
, dim3(grids), dim3(blocks), 0, stream, input, mean, temp_variance, size);
}
} else {
#ifdef PADDLE_WITH_HIP
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#else
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#endif
hipLaunchKernelGGL(( phi::GroupNormForwardGetMeanAndVar<T, AccT>)
, dim3(grid), dim3(threads), 0, stream, input,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
mean,
temp_variance);
}
hipLaunchKernelGGL(( GroupNormForward<T, AccT, 3>)
, dim3(grid), dim3(threads), 0, stream, input,
mean,
temp_variance,
scale,
bias,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
static_cast<AccT>(eps),
output,
variance,
data_layout);
}
template class GroupNormDirectCUDAFunctor<float, float>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class GroupNormDirectCUDAFunctor<half, float>;
#endif
} // namespace phi
PD_REGISTER_KERNEL(group_norm,
GPU,
ALL_LAYOUT,
phi::GroupNormKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::BFLOAT16 ||
kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
}
}
| e92bdf4d4bb2e3dac2678275bcbd99b99e920089.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
template <typename T, typename AccT>
__global__ void GroupNormForwardGetMeanAndVar(const T* x,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT* mean,
AccT* var) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_mean = static_cast<AccT>(0);
AccT x_var = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
x_mean += val;
x_var += val * val;
}
x_mean /= number * imsize;
x_var /= number * imsize;
CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean);
CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var);
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormForward(const T* x,
const AccT* mean,
const AccT* var,
const T* scale,
const T* bias,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT epsilon,
T* y,
AccT* real_var,
const DataLayout data_layout) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int ccid = gid * group_size + cid;
if (ccid >= C) return;
auto ng = bid * groups + gid;
AccT x_mean = mean[ng];
AccT x_var = var[ng];
x_var = x_var - x_mean * x_mean;
AccT var_inv = rsqrt(x_var + epsilon);
if (cid == 0 && threadIdx.x == 0) {
real_var[ng] = x_var;
}
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid, wid;
int index = (bid * C + ccid) * imsize + imid;
if (data_layout == DataLayout::kNCHW) {
val = static_cast<AccT>(x[index]);
} else {
hid = imid / W;
wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
}
val = (val - x_mean) * var_inv;
if (flags & kHasScale) {
val *= static_cast<AccT>(scale[ccid]);
}
if (flags & kHasBias) {
val += static_cast<AccT>(bias[ccid]);
}
if (data_layout == DataLayout::kNCHW) {
y[index] = static_cast<T>(val);
} else {
y[(bid * H + hid) * W * C + wid * C + ccid] = static_cast<T>(val);
}
}
}
template <typename T, typename Context>
void GroupNormKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
dev_ctx.template Alloc<T>(y);
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
// temp_var is used to calculate the mean^2
DenseTensor temp_var;
temp_var.Resize(var->dims());
dev_ctx.template Alloc<AccT>(&temp_var);
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
auto* x_data = x.data<T>();
auto* y_data = y->data<T>();
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
auto* temp_var_data = temp_var.data<AccT>();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
#ifdef __HIPCC__
int block_size = std::max(std::min(256, imsize), 64);
#else
int block_size = std::min(1024, imsize);
#endif
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * imsize;
const int max_num_threads = 1024;
int max_block_size = std::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = std::max(block_size_nchw, kps::details::kWarpSize);
dim3 grids(x_dims[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
ScalarGetMeanAndVarNCHW<T, AccT><<<grids, blocks, 0, dev_ctx.stream()>>>(
x_data, mean_data, temp_var_data, size);
} else {
VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>
<<<grids, blocks, 0, dev_ctx.stream()>>>(
x_data, mean_data, temp_var_data, size);
}
} else {
set_zero_AccT(dev_ctx, mean, static_cast<AccT>(0));
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
GroupNormForwardGetMeanAndVar<T, AccT>
<<<grid, threads, 0, dev_ctx.stream()>>>(x_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
mean_data,
temp_var_data);
}
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormForward,
x_data,
mean_data,
temp_var_data,
scale_data,
bias_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
static_cast<AccT>(epsilon),
y_data,
var_data,
data_layout);
}
template <typename T, typename AccT>
void GroupNormDirectCUDAFunctor<T, AccT>::operator()(
gpuStream_t stream,
const T* input,
std::vector<int> input_shape,
const T* bias,
const T* scale,
AccT* temp_variance,
int groups,
float eps,
T* output,
AccT* mean,
AccT* variance,
const DataLayout data_layout) {
const auto input_ddim = phi::make_ddim(input_shape);
const int C =
(data_layout == DataLayout::kNCHW ? input_ddim[1]
: input_ddim[input_ddim.size() - 1]);
const int group_size = C / groups;
const int W =
(data_layout == DataLayout::kNCHW ? input_ddim[input_ddim.size() - 1]
: input_ddim[input_ddim.size() - 2]);
int image_size = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < input_ddim.size(); ++i) {
image_size *= input_ddim[i];
}
} else {
for (int i = 1; i < input_ddim.size() - 1; ++i) {
image_size *= input_ddim[i];
}
}
#ifdef __HIPCC__
int block_size = std::max(std::min(256, image_size), 64);
#else
int block_size = std::min(1024, image_size);
#endif
dim3 grid(group_size, groups, input_ddim[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * image_size; // group element size
const int max_num_threads = 1024;
int max_block_size = std::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = std::max(block_size_nchw, phi::kps::details::kWarpSize);
dim3 grids(input_ddim[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
phi::ScalarGetMeanAndVarNCHW<T, AccT>
<<<grids, blocks, 0, stream>>>(input, mean, temp_variance, size);
} else {
phi::VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>
<<<grids, blocks, 0, stream>>>(input, mean, temp_variance, size);
}
} else {
#ifdef PADDLE_WITH_HIP
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#else
cudaMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
cudaMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#endif
phi::GroupNormForwardGetMeanAndVar<T, AccT>
<<<grid, threads, 0, stream>>>(input,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
mean,
temp_variance);
}
GroupNormForward<T, AccT, 3>
<<<grid, threads, 0, stream>>>(input,
mean,
temp_variance,
scale,
bias,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
static_cast<AccT>(eps),
output,
variance,
data_layout);
}
template class GroupNormDirectCUDAFunctor<float, float>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class GroupNormDirectCUDAFunctor<half, float>;
#endif
} // namespace phi
PD_REGISTER_KERNEL(group_norm,
GPU,
ALL_LAYOUT,
phi::GroupNormKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::BFLOAT16 ||
kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
}
}
|
ae0a2e9aa3c4fc17dbcab252e7ed375059e145d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//gpu_bench.cu
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define CHECK_ERR(x) \
if (x != hipSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
hipGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
} \
unsigned long MAX_OPS = 20000000;
const long MEGABYTE = 1048576;
__global__ void gpu_iops(unsigned long max_ops) {
// int a = blockDim.x * blockIdx.x + threadIdx.x;
}
int main(int argc, char *argv[]) {
char c;
char test = 'B';
char rw = 'R';
while ( (c = getopt(argc, argv, "r:t:") ) != -1)
{
switch (c)
{
case 'r':
rw = optarg[0];
break;
case 't':
test = optarg[0];
break;
default:
printf("Usage: ./benchCPU -n [number of threads]\n");
return -1;
}
}
struct timeval tv;
long long start, stop;
double secs;
hipError_t err;
unsigned char *d_mem_pointer;
unsigned char *mem_pointer;
hipMemcpyKind dir = hipMemcpyHostToDevice;
if(rw == 'R')
{
dir = hipMemcpyDeviceToHost;
}
else if(rw == 'W')
{
dir - hipMemcpyHostToDevice;
}
if(test == 'B')
{
err = hipMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<MEGABYTE; i++)
{
if(rw == 'W')
err = hipMemcpy((void *)&d_mem_pointer[i], (void *)mem_pointer, 1, dir);
else if(rw == 'R')
err = hipMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i], 1, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, 1.0/(secs));
}
else if(test == 'K')
{
err = hipMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*256*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1024);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<256*MEGABYTE/1024; i++)
{
if(rw == 'W')
err = hipMemcpy((void *)&d_mem_pointer[i*1024], (void *)mem_pointer, 1024, dir);
else if(rw == 'R')
err = hipMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i*1024], 1024, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, (256.0/1024.0)/(secs));
}
else if(test == 'M')
{
err = hipMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*512*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*MEGABYTE);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<512*10; i++)
{
if(rw == 'W')
err = hipMemcpy((void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], (void *)mem_pointer, MEGABYTE, dir);
else if(rw == 'R')
err = hipMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], MEGABYTE, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, (512*10)/(secs));
}
err = hipFree(d_mem_pointer);
CHECK_ERR(err);
}
| ae0a2e9aa3c4fc17dbcab252e7ed375059e145d7.cu | //gpu_bench.cu
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define CHECK_ERR(x) \
if (x != cudaSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
cudaGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
} \
unsigned long MAX_OPS = 20000000;
const long MEGABYTE = 1048576;
__global__ void gpu_iops(unsigned long max_ops) {
// int a = blockDim.x * blockIdx.x + threadIdx.x;
}
int main(int argc, char *argv[]) {
char c;
char test = 'B';
char rw = 'R';
while ( (c = getopt(argc, argv, "r:t:") ) != -1)
{
switch (c)
{
case 'r':
rw = optarg[0];
break;
case 't':
test = optarg[0];
break;
default:
printf("Usage: ./benchCPU -n [number of threads]\n");
return -1;
}
}
struct timeval tv;
long long start, stop;
double secs;
cudaError_t err;
unsigned char *d_mem_pointer;
unsigned char *mem_pointer;
cudaMemcpyKind dir = cudaMemcpyHostToDevice;
if(rw == 'R')
{
dir = cudaMemcpyDeviceToHost;
}
else if(rw == 'W')
{
dir - cudaMemcpyHostToDevice;
}
if(test == 'B')
{
err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<MEGABYTE; i++)
{
if(rw == 'W')
err = cudaMemcpy((void *)&d_mem_pointer[i], (void *)mem_pointer, 1, dir);
else if(rw == 'R')
err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i], 1, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, 1.0/(secs));
}
else if(test == 'K')
{
err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*256*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1024);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<256*MEGABYTE/1024; i++)
{
if(rw == 'W')
err = cudaMemcpy((void *)&d_mem_pointer[i*1024], (void *)mem_pointer, 1024, dir);
else if(rw == 'R')
err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i*1024], 1024, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, (256.0/1024.0)/(secs));
}
else if(test == 'M')
{
err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*512*MEGABYTE);
CHECK_ERR(err);
mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*MEGABYTE);
gettimeofday(&tv, NULL);
start = tv.tv_sec*1000000LL + tv.tv_usec;
for(unsigned long i = 0; i<512*10; i++)
{
if(rw == 'W')
err = cudaMemcpy((void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], (void *)mem_pointer, MEGABYTE, dir);
else if(rw == 'R')
err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], MEGABYTE, dir);
CHECK_ERR(err);
}
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
secs = (stop-start)/1000000.0;
printf("%c\t%c\t%lf\n", rw, test, (512*10)/(secs));
}
err = cudaFree(d_mem_pointer);
CHECK_ERR(err);
}
|
18352e3f35fd87b49ecfa8e5bd033960a71f1cd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THH.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHHalf.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include "THHDeviceUtils.cuh"
#include "THHNumerics.cuh"
#include "THHAtomics.cuh"
#include "THHThrustAllocator.cuh"
#include "THHTensorSort.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <typename IndexType, unsigned int Dims>
struct LinearIndexCalcData {
// sizes for the Tensor dims (from the Tensor, for bounds checking)
IndexType baseSizes[Dims];
// sizes for Tensor dims (either from the Tensor, or the size of the adv indexer at that dim)
IndexType sizes[Dims];
// strides for the Tensor we are indexing into
IndexType strides[Dims];
// these are pointers to the buffers containing the index selected at each dimension
// for all of the indices we want to generate. If a dimension is not under advanced indexing
// then the pointer is NULL
int64_t *advIndexTensors[Dims];
};
template <typename IndexType, unsigned int Dims>
__device__ __forceinline__ int64_t calculateOffset(
IndexType index,
LinearIndexCalcData<IndexType, Dims> data
)
{
IndexType offset = 0;
#pragma unroll
for (int dim = Dims - 1; dim >= 0; --dim) {
IndexType sizeAtDim, strideAtDim, indexAtDim, nextIndex;
strideAtDim = data.strides[dim];
sizeAtDim = data.sizes[dim];
if (data.advIndexTensors[dim] != NULL) {
indexAtDim = data.advIndexTensors[dim][index % sizeAtDim];
// Check if next dimension is also advanced indexing, if so we must keep the index
// the same and iterate together
if (dim > 0 && data.advIndexTensors[dim - 1] != NULL) {
nextIndex = index;
} else {
nextIndex = index / sizeAtDim;
}
} else {
nextIndex = index / sizeAtDim;
indexAtDim = index - nextIndex * sizeAtDim;
}
assert(indexAtDim < data.baseSizes[dim]);
offset += indexAtDim * strideAtDim;
index = nextIndex;
}
return offset;
}
template <typename IndexType, unsigned int Dims>
__global__ void calculateLinearIndices(
int64_t *output, // output Tensor for indices
int elements, // number of elements in output <-> indices to calculate
ptrdiff_t baseOffset, // base offset into the Tensor
LinearIndexCalcData<IndexType, Dims> data
)
{
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < elements;
i += blockDim.x * gridDim.x) {
output[i] = baseOffset + calculateOffset<IndexType, Dims>(i, data);
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename real, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = TensorUtils<TensorType>::getNumElements(state, a);
if (aInfo.isContiguous()) {
auto op = Op<real, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2(state, b, index, op);
} else {
auto op = Op<real, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2(state, b, index, op);
}
}
template<typename real, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (TensorUtils<TensorType>::canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, real, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, real, Op>(state, a, b, index);
}
}
#include "generic/THCTensorIndex.cu"
#include "THHGenerateAllTypes.h"
| 18352e3f35fd87b49ecfa8e5bd033960a71f1cd5.cu | #include "THC.h"
#include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCHalf.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include "THCDeviceUtils.cuh"
#include "THCNumerics.cuh"
#include "THCAtomics.cuh"
#include "THCThrustAllocator.cuh"
#include "THCTensorSort.cuh"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize * indices.sizes[0];
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex = linearIndex / innerSize;
IndexType elementInSlice = linearIndex % innerSize;
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <typename IndexType, unsigned int Dims>
struct LinearIndexCalcData {
// sizes for the Tensor dims (from the Tensor, for bounds checking)
IndexType baseSizes[Dims];
// sizes for Tensor dims (either from the Tensor, or the size of the adv indexer at that dim)
IndexType sizes[Dims];
// strides for the Tensor we are indexing into
IndexType strides[Dims];
// these are pointers to the buffers containing the index selected at each dimension
// for all of the indices we want to generate. If a dimension is not under advanced indexing
// then the pointer is NULL
int64_t *advIndexTensors[Dims];
};
template <typename IndexType, unsigned int Dims>
__device__ __forceinline__ int64_t calculateOffset(
IndexType index,
LinearIndexCalcData<IndexType, Dims> data
)
{
IndexType offset = 0;
#pragma unroll
for (int dim = Dims - 1; dim >= 0; --dim) {
IndexType sizeAtDim, strideAtDim, indexAtDim, nextIndex;
strideAtDim = data.strides[dim];
sizeAtDim = data.sizes[dim];
if (data.advIndexTensors[dim] != NULL) {
indexAtDim = data.advIndexTensors[dim][index % sizeAtDim];
// Check if next dimension is also advanced indexing, if so we must keep the index
// the same and iterate together
if (dim > 0 && data.advIndexTensors[dim - 1] != NULL) {
nextIndex = index;
} else {
nextIndex = index / sizeAtDim;
}
} else {
nextIndex = index / sizeAtDim;
indexAtDim = index - nextIndex * sizeAtDim;
}
assert(indexAtDim < data.baseSizes[dim]);
offset += indexAtDim * strideAtDim;
index = nextIndex;
}
return offset;
}
template <typename IndexType, unsigned int Dims>
__global__ void calculateLinearIndices(
int64_t *output, // output Tensor for indices
int elements, // number of elements in output <-> indices to calculate
ptrdiff_t baseOffset, // base offset into the Tensor
LinearIndexCalcData<IndexType, Dims> data
)
{
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < elements;
i += blockDim.x * gridDim.x) {
output[i] = baseOffset + calculateOffset<IndexType, Dims>(i, data);
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename real, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = TensorUtils<TensorType>::getNumElements(state, a);
if (aInfo.isContiguous()) {
auto op = Op<real, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2(state, b, index, op);
} else {
auto op = Op<real, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2(state, b, index, op);
}
}
template<typename real, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (TensorUtils<TensorType>::canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, real, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, real, Op>(state, a, b, index);
}
}
#include "generic/THCTensorIndex.cu"
#include "THCGenerateAllTypes.h"
|
0a84aee11df281f2190fdafa4c059e5b70d0cce8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernel_hip.cuh"
__global__ void kernelMatrixMul(int* A, int* B, int* C, int n)
{
int i = threadIdx.x;
int j = blockIdx.x;
for(int k=0; k<n; k++)
{
C[i*n+j]+=A[i*n+k]*B[k*n+j];
}
}
void StartCuda(int* A, int* B, int* C, int n)
{
hipLaunchKernelGGL(( kernelMatrixMul), dim3(n), dim3(n), 0, 0, A,B,C,n);
hipDeviceSynchronize();
}
| 0a84aee11df281f2190fdafa4c059e5b70d0cce8.cu | #include <stdio.h>
#include "kernel.cuh"
__global__ void kernelMatrixMul(int* A, int* B, int* C, int n)
{
int i = threadIdx.x;
int j = blockIdx.x;
for(int k=0; k<n; k++)
{
C[i*n+j]+=A[i*n+k]*B[k*n+j];
}
}
void StartCuda(int* A, int* B, int* C, int n)
{
kernelMatrixMul<<<n, n>>>(A,B,C,n);
cudaDeviceSynchronize();
}
|
a69e76d8301361063973911be3e71b160738aaab.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
__global__ void Convolution(double* A, double* B, int I, int J)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.1;
if (i>J && i<I*J-J && (i%J!=0) && ((i+1)%J!=0)) {
B[i] = c11 * A[i-J-1] + c12 * A[i-1] + c13 * A[i+J-1]
+ c21 * A[i-J] + c22 * A[i] + c23 * A[i+J]
+ c31 * A[i-J+1] + c32 * A[i+1] + c33 * A[i+J+1];
}
}
void init(double* A, int I, int J)
{
int i, j;
for (i = 0; i < I; ++i) {
for (j = 0; j < J; ++j) {
A[i*J + j] = (double)rand()/RAND_MAX;
}
}
}
int main(int argc, char *argv[])
{
FILE *output1;
double *A;
double *B;
hipEvent_t start, stop;
float elapsedTime;
output1 = fopen("convgpu.out", "w");
int I = atoi(argv[1]), J = atoi(argv[2]);
int size = I*J*sizeof(double);
A = (double*)malloc(size);
B = (double*)malloc(size);
hipError_t err = hipSuccess;
double *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
double *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//initialize the arrays
init(A, I, J);
//host to Device
err = hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int threadsPerBlock=128;
int blocksPerGrid;
if (I*J%threadsPerBlock != 0){
blocksPerGrid = I*J/threadsPerBlock+1;
}else {
blocksPerGrid=I*J/threadsPerBlock;
}
printf("blocksPerGrid: %d\n", blocksPerGrid);
printf("threadsPerBlock: %d\n", threadsPerBlock);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( Convolution), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, I, J);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
err = hipMemcpy(B, d_B, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "error code %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i = 0; i < I; i++) {
for (int j = 0; j < J; j++) {
fprintf(output1, "%f ", B[i*J+j]);
}
fprintf(output1, "\n");
}
printf("Elapsed time : %f s\n" ,elapsedTime/1000);
free(A);
free(B);
hipFree(d_A);
hipFree(d_B);
fclose(output1);
return 0;
}
| a69e76d8301361063973911be3e71b160738aaab.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
__global__ void Convolution(double* A, double* B, int I, int J)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.1;
if (i>J && i<I*J-J && (i%J!=0) && ((i+1)%J!=0)) {
B[i] = c11 * A[i-J-1] + c12 * A[i-1] + c13 * A[i+J-1]
+ c21 * A[i-J] + c22 * A[i] + c23 * A[i+J]
+ c31 * A[i-J+1] + c32 * A[i+1] + c33 * A[i+J+1];
}
}
void init(double* A, int I, int J)
{
int i, j;
for (i = 0; i < I; ++i) {
for (j = 0; j < J; ++j) {
A[i*J + j] = (double)rand()/RAND_MAX;
}
}
}
int main(int argc, char *argv[])
{
FILE *output1;
double *A;
double *B;
cudaEvent_t start, stop;
float elapsedTime;
output1 = fopen("convgpu.out", "w");
int I = atoi(argv[1]), J = atoi(argv[2]);
int size = I*J*sizeof(double);
A = (double*)malloc(size);
B = (double*)malloc(size);
cudaError_t err = cudaSuccess;
double *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//initialize the arrays
init(A, I, J);
//host to Device
err = cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int threadsPerBlock=128;
int blocksPerGrid;
if (I*J%threadsPerBlock != 0){
blocksPerGrid = I*J/threadsPerBlock+1;
}else {
blocksPerGrid=I*J/threadsPerBlock;
}
printf("blocksPerGrid: %d\n", blocksPerGrid);
printf("threadsPerBlock: %d\n", threadsPerBlock);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
Convolution<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, I, J);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
err = cudaMemcpy(B, d_B, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i = 0; i < I; i++) {
for (int j = 0; j < J; j++) {
fprintf(output1, "%f ", B[i*J+j]);
}
fprintf(output1, "\n");
}
printf("Elapsed time : %f s\n" ,elapsedTime/1000);
free(A);
free(B);
cudaFree(d_A);
cudaFree(d_B);
fclose(output1);
return 0;
}
|
a9d88d5d015559c69eca1f1d6a642c904a83b3fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/context_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( FloatToHalfKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
template <>
bool HalfToFloatOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp<CUDAContext>);
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
| a9d88d5d015559c69eca1f1d6a642c904a83b3fb.cu | #include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/context_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
template <>
bool HalfToFloatOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp<CUDAContext>);
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
|
4a4ae5b27aeff4ba5df0c3d2fc3fb8c33dc4d6b6.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/ThrustAllocator.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/ParamUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/sparse/hip/SparseHIPBlas.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_log_softmax_cuda_dispatch.h>
#include <ATen/ops/_log_softmax_backward_data_cuda_dispatch.h>
#include <ATen/ops/_softmax_cuda_dispatch.h>
#include <ATen/ops/_softmax_backward_data_cuda_dispatch.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/full.h>
#include <ATen/ops/softmax.h>
#endif
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <hip/hip_runtime_api.h>
#include <hipsparse.h>
#include <bitset>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <c10/macros/Macros.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <c10/hip/HIPMathCompat.h>
namespace at {
namespace native {
namespace {
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
#if defined(USE_ROCM)
int threadSizes[5] = {16, 32, 64, 128, 256};
#else
int threadSizes[5] = {32, 64, 128, 256, 512};
#endif
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return threadSizes[4];
}
template <typename scalar_t, bool LogSoftMax>
__global__ void cuda_sparse_coo_softmax_kernel(
int64_t* sorted_pool_indices,
int64_t size,
int64_t* pool_sizes,
int64_t* pool_offsets,
int64_t nvalues,
scalar_t* mx_rows,
PackedTensorAccessor<scalar_t, 2> input_values_acc,
PackedTensorAccessor<scalar_t, 2> output_values_acc) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation of the sparse softmax algorithm that this implementation is
based on.
*/
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int index = tid + blkid * blksz;
int step = blksz * gridsz;
while (index < size) {
int64_t offset = pool_offsets[index];
int64_t* pool_indices = sorted_pool_indices + offset;
int64_t pool_indices_size = pool_sizes[index];
scalar_t* mx_row = mx_rows + index * nvalues;
for (int64_t j = 0; j < nvalues; j++) {
scalar_t exp_sums = 0;
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto values_row = input_values_acc[i];
auto out_values_row = output_values_acc[i];
auto v = c10::hip::compat::exp(values_row[j] - mx_row[j]);
if (!LogSoftMax) {
out_values_row[j] = v;
}
exp_sums += v;
}
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto values_row = input_values_acc[i];
auto out_values_row = output_values_acc[i];
if (LogSoftMax) {
out_values_row[j] = values_row[j] - mx_row[j] - c10::hip::compat::log(exp_sums);
} else {
out_values_row[j] *= 1.0 / exp_sums;
}
}
}
index += step;
}
}
template <typename scalar_t, bool LogSoftMax>
__global__ void cuda_sparse_coo_softmax_backward_kernel(
int64_t* sorted_pool_indices,
int64_t size,
int64_t* pool_sizes,
int64_t* pool_offsets,
int64_t nvalues,
int64_t grad_nnz,
int64_t* grad_offsets,
int64_t* out_offsets,
int64_t* lower_bound_values,
PackedTensorAccessor<scalar_t, 2> values_accessor,
PackedTensorAccessor<scalar_t, 2> out_values_accessor,
PackedTensorAccessor<scalar_t, 2> grad_values_accessor) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax_backward for
the CPU implementation of the sparse softmax backward algorithm that this
implementation is based on.
*/
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int index = tid + blkid * blksz;
int step = blksz * gridsz;
while (index < size) {
int64_t offset = pool_offsets[index];
int64_t* pool_indices = sorted_pool_indices + offset;
int64_t pool_indices_size = pool_sizes[index];
for (int64_t k = 0; k < nvalues; k++) {
scalar_t tmp_row{0};
/* Compute tmp = - sum_j output_j * grad_j */
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto out_values_row = out_values_accessor[i];
auto j = lower_bound_values[i];
/* Update `tmp_row` accumulator only when limits and pools are valid */
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto grad_values_row = grad_values_accessor[j];
if (LogSoftMax) {
tmp_row -= grad_values_row[k];
} else {
tmp_row -= out_values_row[k] * grad_values_row[k];
}
}
}
/* Compute grad_input = output * (grad + tmp)*/
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto out_values_row = out_values_accessor[i];
auto values_row = values_accessor[i];
auto j = lower_bound_values[i];
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto grad_values_row = grad_values_accessor[j];
if (LogSoftMax) {
values_row[k] = grad_values_row[k] +
c10::hip::compat::exp(out_values_row[k]) * tmp_row;
} else {
values_row[k] =
out_values_row[k] * (grad_values_row[k] + tmp_row);
}
} else {
if (LogSoftMax) {
values_row[k] =
c10::hip::compat::exp(out_values_row[k]) * tmp_row;
} else {
values_row[k] = out_values_row[k] * tmp_row;
}
}
}
}
index += step;
}
}
using thrust_ptr = thrust::device_ptr<int64_t>;
Tensor get_offsets(
const Tensor& indices,
const IntArrayRef& sizes,
const int64_t dim) {
/*
See ATen/native/sparse/Softmax.cpp:get_offsets for the CPU
implementation of get_offsets function that this implementation is based on.
*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
auto ndim = indices.size(0);
auto nnz = indices.size(1);
std::vector<int64_t> host_strides(ndim, 1);
if (ndim > 1) {
for (int64_t i = ndim - 2; i >= 0; i--) {
host_strides[i] =
host_strides[i + 1] * (i + 1 == dim ? 1 : sizes[i + 1]);
}
}
auto strides = at::empty({ndim}, indices.options());
auto strides_ptr = strides.data_ptr<int64_t>();
AT_CUDA_CHECK(hipMemcpyAsync(
strides_ptr, host_strides.data(), host_strides.size() * sizeof(int64_t),
hipMemcpyHostToDevice,
stream));
auto indices_accessor = indices.packed_accessor<int64_t, 2>();
Tensor offsets = at::empty({nnz}, indices.options());
thrust::transform(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(nnz)),
thrust::device_ptr<int64_t>(offsets.data_ptr<int64_t>()),
[indices_accessor, strides_ptr, dim, ndim] __device__(int64_t x) {
int64_t pool_index = 0;
for (int64_t j = 0; j < ndim; j++) {
if (j != dim) {
auto indices_row = indices_accessor[j];
auto stride = strides_ptr[j];
pool_index += stride * indices_row[x];
}
}
return pool_index;
});
return offsets;
}
template <class scalar_t, bool requireMxRows = true>
std::tuple<Tensor, Tensor, Tensor, Tensor> compute_pool_max(
const Tensor& indices,
const Tensor& values,
const IntArrayRef& sizes,
int64_t nvalues,
const int64_t dim) {
/*
Return pools of indices that align with the given dimension and the
corresponding max values for each pool.
See ATen/native/sparse/Softmax.cpp:get_offsets and
ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation that this implementation is based on.
*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
auto nnz = indices.size(1);
auto offsets = get_offsets(indices, sizes, dim);
int64_t* offsets_ptr = offsets.data_ptr<int64_t>();
auto sorted_indices = at::empty({nnz}, indices.options());
thrust_ptr sorted_indices_thrust_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sequence(
policy, sorted_indices_thrust_ptr, sorted_indices_thrust_ptr + nnz, 0);
thrust::sort(
policy,
sorted_indices_thrust_ptr,
sorted_indices_thrust_ptr + nnz,
[offsets_ptr] __device__(int64_t x, int64_t y) {
return offsets_ptr[x] < offsets_ptr[y];
});
auto pool_sizes = at::empty({nnz}, indices.options());
auto new_end = thrust::reduce_by_key(
policy,
sorted_indices_thrust_ptr,
sorted_indices_thrust_ptr + nnz,
thrust::make_constant_iterator(int64_t(1)),
thrust::make_discard_iterator(),
thrust_ptr(pool_sizes.data_ptr<int64_t>()),
[offsets_ptr] __device__(int64_t x, int64_t y) {
return offsets_ptr[x] == offsets_ptr[y];
});
auto new_sz = thrust::distance(
thrust_ptr(pool_sizes.data_ptr<int64_t>()), new_end.second);
pool_sizes.resize_({new_sz});
auto pool_offsets = pool_sizes.clone();
thrust_ptr pool_offsets_thrust_ptr(
pool_offsets.data_ptr<int64_t>());
thrust::exclusive_scan(
policy,
pool_offsets_thrust_ptr,
pool_offsets_thrust_ptr + new_sz,
pool_offsets_thrust_ptr);
Tensor mx_buffer;
if (requireMxRows) {
auto values_accessor =
values.packed_accessor<scalar_t, 2>(); // {nnz, nvalues}
mx_buffer = at::full({new_sz * nvalues}, Scalar(-std::numeric_limits<scalar_t>::infinity()), values.options());
auto mx_buffer_ptr = mx_buffer.data_ptr<scalar_t>();
auto pool_sizes_ptr = pool_sizes.data_ptr<int64_t>();
auto sorted_indices_ptr = sorted_indices.data_ptr<int64_t>();
auto pool_offsets_ptr = pool_offsets.data_ptr<int64_t>();
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(new_sz)),
[values_accessor,
sorted_indices_ptr,
pool_sizes_ptr,
pool_offsets_ptr,
mx_buffer_ptr,
nvalues] __device__(int64_t index) {
int64_t curr_pool_size = pool_sizes_ptr[index];
auto mx_row = mx_buffer_ptr + index * nvalues;
int64_t offset = pool_offsets_ptr[index];
for (int64_t p = 0; p < curr_pool_size; p++) {
int64_t i = *(sorted_indices_ptr + offset + p);
auto values_row = values_accessor[i].data();
for (int64_t j = 0; j < nvalues; j++) {
mx_row[j] = c10::hip::compat::max(mx_row[j], values_row[j]);
}
}
});
}
return std::make_tuple(
sorted_indices, pool_offsets, pool_sizes, mx_buffer);
}
template <typename scalar_t, bool LogSoftMax>
void cuda_sparse_coo_softmax(
Tensor& output,
const Tensor& input,
const int64_t dim) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation of the sparse softmax algorithm that this implementation is
based on.
*/
auto sparse_dim = input.sparse_dim();
auto indices = input._indices().contiguous();
auto values = input._values().contiguous();
auto out_values = output._values();
auto out_indices = output._indices();
out_values.resize_as_(values);
out_indices.resize_as_(indices);
out_indices.copy_(indices);
if (dim >= sparse_dim) {
if (LogSoftMax) {
auto new_values =
at::cuda::_log_softmax(values, dim - sparse_dim + 1, false);
out_values.set_(new_values);
} else {
auto new_values = at::cuda::_softmax(values, dim - sparse_dim + 1, false);
out_values.set_(new_values);
}
return;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto nnz = values.size(0);
auto sizes = input.sizes();
auto nvalues = values.numel() / nnz;
/* Prepare accessors */
auto values_2 = values.view({nnz, nvalues});
auto values_accessor = values_2.packed_accessor<scalar_t, 2>();
auto out_values_2 = out_values.view({nnz, nvalues});
auto out_values_accessor = out_values_2.packed_accessor<scalar_t, 2>();
Tensor sorted_indices;
Tensor pool_offsets;
Tensor pool_sizes;
Tensor mx_buffer;
std::tie(sorted_indices, pool_offsets, pool_sizes, mx_buffer) =
compute_pool_max<scalar_t, true>(indices, values_2, sizes, nvalues, dim);
auto pool_size = pool_offsets.size(0);
int block_size = getNumThreads(pool_size);
const int grid_size = (pool_size + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuda_sparse_coo_softmax_kernel<scalar_t, LogSoftMax>)
, dim3(grid_size), dim3(block_size), 0, stream,
sorted_indices.data_ptr<int64_t>(),
pool_size,
pool_sizes.data_ptr<int64_t>(),
pool_offsets.data_ptr<int64_t>(),
nvalues,
mx_buffer.data_ptr<scalar_t>(),
values_accessor,
out_values_accessor);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, bool LogSoftMax>
void cuda_sparse_coo_softmax_backward(
Tensor& grad_input,
const Tensor& grad,
const Tensor& output,
const int64_t dim,
ScalarType input_dtype) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax_backward for
the CPU implementation of the sparse softmax backward algorithm that this
implementation is based on.
*/
auto sparse_dim = output.sparse_dim();
auto sizes = output.sizes().vec();
auto grad_indices = grad._indices().contiguous();
auto grad_values = grad._values().contiguous();
auto out_indices = output._indices().contiguous();
auto out_values = output._values().contiguous();
auto values = grad_input._values();
auto indices = grad_input._indices();
auto out_nnz = out_values.size(0);
auto grad_nnz = grad_values.size(0);
values.resize_as_(out_values);
values.zero_();
indices.resize_as_(out_indices);
indices.copy_(out_indices);
auto out_offsets = get_offsets(out_indices, sizes, -1);
auto grad_offsets = get_offsets(grad_indices, sizes, -1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
/* when dim >= sparse_dim the dense backward is used */
if (dim >= sparse_dim) {
if (at::native::cuda_equal(out_offsets, grad_offsets) == true) {
if (LogSoftMax) {
auto r = at::cuda::_log_softmax_backward_data(
grad_values, out_values, dim - sparse_dim + 1, input_dtype);
values.set_(r);
} else {
auto r = at::cuda::_softmax_backward_data(grad_values, out_values, dim - sparse_dim + 1, input_dtype);
values.set_(r);
}
} else {
auto host_out_offsets =
out_offsets.to(at::Device(kCPU), indices.dtype(), false, true);
auto host_grad_offsets =
grad_offsets.to(at::Device(kCPU), indices.dtype(), false, true);
auto out_offsets_accessor = host_out_offsets.data_ptr<int64_t>();
auto grad_offsets_accessor = host_grad_offsets.data_ptr<int64_t>();
for (int64_t i = 0; i < out_nnz; i++) {
auto low = thrust::lower_bound(
grad_offsets_accessor,
grad_offsets_accessor + grad_offsets.size(0),
out_offsets_accessor[i]);
auto j = low - grad_offsets_accessor;
/*
Compute output using dense backward only when limits and pools are valid
If this check is false then a sparse tensor with full of zeros is returned
*/
if (j < grad_nnz && out_offsets_accessor[i] == grad_offsets_accessor[j]) {
if (LogSoftMax) {
auto r = at::cuda::_log_softmax_backward_data(
grad_values[j], out_values[i], dim - sparse_dim, input_dtype);
values[i].copy_(r);
} else {
auto r = at::cuda::_softmax_backward_data(
grad_values[j], out_values[i], dim - sparse_dim, input_dtype);
values[i].copy_(r);
}
}
}
}
return;
}
auto nnz = values.size(0);
auto nvalues = values.numel() / nnz;
auto values_2 = values.view({nnz, nvalues});
auto values_accessor = values_2.packed_accessor<scalar_t, 2>();
auto out_values_2 = out_values.view({out_nnz, nvalues});
auto out_values_accessor = out_values_2.packed_accessor<scalar_t, 2>();
auto grad_values_2 = grad_values.view({grad_nnz, nvalues});
auto grad_values_accessor = grad_values_2.packed_accessor<scalar_t, 2>();
Tensor lower_bound_values =
at::empty({out_offsets.size(0)}, indices.options());
thrust::lower_bound(
policy,
thrust_ptr(grad_offsets.data_ptr<int64_t>()),
thrust_ptr(grad_offsets.data_ptr<int64_t>() + grad_offsets.size(0)),
thrust_ptr(out_offsets.data_ptr<int64_t>()),
thrust_ptr(out_offsets.data_ptr<int64_t>()) + out_offsets.size(0),
thrust_ptr(lower_bound_values.data_ptr<int64_t>()));
Tensor sorted_indices;
Tensor pool_offsets;
Tensor pool_sizes;
/* Compute independent pools of indices */
std::tie(
sorted_indices, pool_offsets, pool_sizes, std::ignore) =
compute_pool_max<scalar_t, false>(
out_indices, values_2, sizes, nvalues, dim);
auto pool_size = pool_offsets.size(0);
int block_size = getNumThreads(pool_size);
const int grid_size = (pool_size + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuda_sparse_coo_softmax_backward_kernel<scalar_t, LogSoftMax>)
, dim3(grid_size), dim3(block_size), 0, stream,
sorted_indices.data_ptr<int64_t>(),
pool_size,
pool_sizes.data_ptr<int64_t>(),
pool_offsets.data_ptr<int64_t>(),
nvalues,
grad_nnz,
grad_offsets.data_ptr<int64_t>(),
out_offsets.data_ptr<int64_t>(),
lower_bound_values.data_ptr<int64_t>(),
values_accessor,
out_values_accessor,
grad_values_accessor);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} // end anonymous namespace
Tensor softmax_sparse_cuda(
const Tensor& input_,
const int64_t dim,
const bool half_to_float) {
Tensor input, output;
std::tie(input, output) = softmax_sparse_input_preprocessing(
input_, dim, half_to_float, "softmax");
if (input.numel() == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softmax", [&] {
cuda_sparse_coo_softmax<scalar_t, false>(output, input, dim);
});
return output;
}
Tensor log_softmax_sparse_cuda(
const Tensor& input_,
const int64_t dim,
const bool half_to_float) {
Tensor input, output;
std::tie(input, output) = softmax_sparse_input_preprocessing(
input_, dim, half_to_float, "log_softmax");
if (input.numel() == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "log_softmax", [&] {
cuda_sparse_coo_softmax<scalar_t, true>(output, input, dim);
});
return output;
}
Tensor softmax_backward_sparse_cuda(
const Tensor& grad_,
const Tensor& output_,
int64_t dim_,
const Tensor& input_) {
Tensor grad_input, grad, output;
std::tie(grad_input, grad, output) =
softmax_backward_sparse_input_preprocessing(
grad_, output_, dim_, input_, "softmax_backward");
if (output.numel() == 0) {
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "softmax_backward", [&] {
cuda_sparse_coo_softmax_backward<scalar_t, false>(
grad_input, grad, output, dim_, input_.scalar_type());
});
return grad_input;
}
Tensor log_softmax_backward_sparse_cuda(
const Tensor& grad_,
const Tensor& output_,
int64_t dim_,
const Tensor& input_) {
Tensor grad_input, grad, output;
std::tie(grad_input, grad, output) =
softmax_backward_sparse_input_preprocessing(
grad_, output_, dim_, input_, "log_softmax_backward");
if (output.numel() == 0) {
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "log_softmax_backward", [&] {
cuda_sparse_coo_softmax_backward<scalar_t, true>(
grad_input, grad, output, dim_, input_.scalar_type());
});
return grad_input;
}
} // namespace native
} // namespace at
| 4a4ae5b27aeff4ba5df0c3d2fc3fb8c33dc4d6b6.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAUtils.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <ATen/native/sparse/SparseTensorMath.h>
#include <ATen/native/sparse/ParamUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/sparse/cuda/SparseCUDABlas.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_masked_softmax_native.h>
#include <ATen/ops/_log_softmax_cuda_dispatch.h>
#include <ATen/ops/_log_softmax_backward_data_cuda_dispatch.h>
#include <ATen/ops/_softmax_cuda_dispatch.h>
#include <ATen/ops/_softmax_backward_data_cuda_dispatch.h>
#include <ATen/ops/equal_native.h>
#include <ATen/ops/full.h>
#include <ATen/ops/softmax.h>
#endif
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <cuda_runtime_api.h>
#include <cusparse.h>
#include <bitset>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/macros/Macros.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <c10/cuda/CUDAMathCompat.h>
namespace at {
namespace native {
namespace {
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static int getNumThreads(int nElem) {
#if defined(USE_ROCM)
int threadSizes[5] = {16, 32, 64, 128, 256};
#else
int threadSizes[5] = {32, 64, 128, 256, 512};
#endif
for (int i = 0; i != 5; ++i) {
if (nElem <= threadSizes[i]) {
return threadSizes[i];
}
}
return threadSizes[4];
}
template <typename scalar_t, bool LogSoftMax>
__global__ void cuda_sparse_coo_softmax_kernel(
int64_t* sorted_pool_indices,
int64_t size,
int64_t* pool_sizes,
int64_t* pool_offsets,
int64_t nvalues,
scalar_t* mx_rows,
PackedTensorAccessor<scalar_t, 2> input_values_acc,
PackedTensorAccessor<scalar_t, 2> output_values_acc) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation of the sparse softmax algorithm that this implementation is
based on.
*/
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int index = tid + blkid * blksz;
int step = blksz * gridsz;
while (index < size) {
int64_t offset = pool_offsets[index];
int64_t* pool_indices = sorted_pool_indices + offset;
int64_t pool_indices_size = pool_sizes[index];
scalar_t* mx_row = mx_rows + index * nvalues;
for (int64_t j = 0; j < nvalues; j++) {
scalar_t exp_sums = 0;
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto values_row = input_values_acc[i];
auto out_values_row = output_values_acc[i];
auto v = c10::cuda::compat::exp(values_row[j] - mx_row[j]);
if (!LogSoftMax) {
out_values_row[j] = v;
}
exp_sums += v;
}
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto values_row = input_values_acc[i];
auto out_values_row = output_values_acc[i];
if (LogSoftMax) {
out_values_row[j] = values_row[j] - mx_row[j] - c10::cuda::compat::log(exp_sums);
} else {
out_values_row[j] *= 1.0 / exp_sums;
}
}
}
index += step;
}
}
template <typename scalar_t, bool LogSoftMax>
__global__ void cuda_sparse_coo_softmax_backward_kernel(
int64_t* sorted_pool_indices,
int64_t size,
int64_t* pool_sizes,
int64_t* pool_offsets,
int64_t nvalues,
int64_t grad_nnz,
int64_t* grad_offsets,
int64_t* out_offsets,
int64_t* lower_bound_values,
PackedTensorAccessor<scalar_t, 2> values_accessor,
PackedTensorAccessor<scalar_t, 2> out_values_accessor,
PackedTensorAccessor<scalar_t, 2> grad_values_accessor) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax_backward for
the CPU implementation of the sparse softmax backward algorithm that this
implementation is based on.
*/
int tid = threadIdx.x;
int blkid = blockIdx.x;
int blksz = blockDim.x;
int gridsz = gridDim.x;
int index = tid + blkid * blksz;
int step = blksz * gridsz;
while (index < size) {
int64_t offset = pool_offsets[index];
int64_t* pool_indices = sorted_pool_indices + offset;
int64_t pool_indices_size = pool_sizes[index];
for (int64_t k = 0; k < nvalues; k++) {
scalar_t tmp_row{0};
/* Compute tmp = - sum_j output_j * grad_j */
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto out_values_row = out_values_accessor[i];
auto j = lower_bound_values[i];
/* Update `tmp_row` accumulator only when limits and pools are valid */
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto grad_values_row = grad_values_accessor[j];
if (LogSoftMax) {
tmp_row -= grad_values_row[k];
} else {
tmp_row -= out_values_row[k] * grad_values_row[k];
}
}
}
/* Compute grad_input = output * (grad + tmp)*/
for (int64_t p = 0; p < pool_indices_size; p++) {
auto i = pool_indices[p];
auto out_values_row = out_values_accessor[i];
auto values_row = values_accessor[i];
auto j = lower_bound_values[i];
if (j < grad_nnz && (out_offsets[i] == grad_offsets[j])) {
auto grad_values_row = grad_values_accessor[j];
if (LogSoftMax) {
values_row[k] = grad_values_row[k] +
c10::cuda::compat::exp(out_values_row[k]) * tmp_row;
} else {
values_row[k] =
out_values_row[k] * (grad_values_row[k] + tmp_row);
}
} else {
if (LogSoftMax) {
values_row[k] =
c10::cuda::compat::exp(out_values_row[k]) * tmp_row;
} else {
values_row[k] = out_values_row[k] * tmp_row;
}
}
}
}
index += step;
}
}
using thrust_ptr = thrust::device_ptr<int64_t>;
Tensor get_offsets(
const Tensor& indices,
const IntArrayRef& sizes,
const int64_t dim) {
/*
See ATen/native/sparse/Softmax.cpp:get_offsets for the CPU
implementation of get_offsets function that this implementation is based on.
*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
auto ndim = indices.size(0);
auto nnz = indices.size(1);
std::vector<int64_t> host_strides(ndim, 1);
if (ndim > 1) {
for (int64_t i = ndim - 2; i >= 0; i--) {
host_strides[i] =
host_strides[i + 1] * (i + 1 == dim ? 1 : sizes[i + 1]);
}
}
auto strides = at::empty({ndim}, indices.options());
auto strides_ptr = strides.data_ptr<int64_t>();
AT_CUDA_CHECK(cudaMemcpyAsync(
strides_ptr, host_strides.data(), host_strides.size() * sizeof(int64_t),
cudaMemcpyHostToDevice,
stream));
auto indices_accessor = indices.packed_accessor<int64_t, 2>();
Tensor offsets = at::empty({nnz}, indices.options());
thrust::transform(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(nnz)),
thrust::device_ptr<int64_t>(offsets.data_ptr<int64_t>()),
[indices_accessor, strides_ptr, dim, ndim] __device__(int64_t x) {
int64_t pool_index = 0;
for (int64_t j = 0; j < ndim; j++) {
if (j != dim) {
auto indices_row = indices_accessor[j];
auto stride = strides_ptr[j];
pool_index += stride * indices_row[x];
}
}
return pool_index;
});
return offsets;
}
template <class scalar_t, bool requireMxRows = true>
std::tuple<Tensor, Tensor, Tensor, Tensor> compute_pool_max(
const Tensor& indices,
const Tensor& values,
const IntArrayRef& sizes,
int64_t nvalues,
const int64_t dim) {
/*
Return pools of indices that align with the given dimension and the
corresponding max values for each pool.
See ATen/native/sparse/Softmax.cpp:get_offsets and
ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation that this implementation is based on.
*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
auto nnz = indices.size(1);
auto offsets = get_offsets(indices, sizes, dim);
int64_t* offsets_ptr = offsets.data_ptr<int64_t>();
auto sorted_indices = at::empty({nnz}, indices.options());
thrust_ptr sorted_indices_thrust_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sequence(
policy, sorted_indices_thrust_ptr, sorted_indices_thrust_ptr + nnz, 0);
thrust::sort(
policy,
sorted_indices_thrust_ptr,
sorted_indices_thrust_ptr + nnz,
[offsets_ptr] __device__(int64_t x, int64_t y) {
return offsets_ptr[x] < offsets_ptr[y];
});
auto pool_sizes = at::empty({nnz}, indices.options());
auto new_end = thrust::reduce_by_key(
policy,
sorted_indices_thrust_ptr,
sorted_indices_thrust_ptr + nnz,
thrust::make_constant_iterator(int64_t(1)),
thrust::make_discard_iterator(),
thrust_ptr(pool_sizes.data_ptr<int64_t>()),
[offsets_ptr] __device__(int64_t x, int64_t y) {
return offsets_ptr[x] == offsets_ptr[y];
});
auto new_sz = thrust::distance(
thrust_ptr(pool_sizes.data_ptr<int64_t>()), new_end.second);
pool_sizes.resize_({new_sz});
auto pool_offsets = pool_sizes.clone();
thrust_ptr pool_offsets_thrust_ptr(
pool_offsets.data_ptr<int64_t>());
thrust::exclusive_scan(
policy,
pool_offsets_thrust_ptr,
pool_offsets_thrust_ptr + new_sz,
pool_offsets_thrust_ptr);
Tensor mx_buffer;
if (requireMxRows) {
auto values_accessor =
values.packed_accessor<scalar_t, 2>(); // {nnz, nvalues}
mx_buffer = at::full({new_sz * nvalues}, Scalar(-std::numeric_limits<scalar_t>::infinity()), values.options());
auto mx_buffer_ptr = mx_buffer.data_ptr<scalar_t>();
auto pool_sizes_ptr = pool_sizes.data_ptr<int64_t>();
auto sorted_indices_ptr = sorted_indices.data_ptr<int64_t>();
auto pool_offsets_ptr = pool_offsets.data_ptr<int64_t>();
thrust::for_each(
policy,
thrust::make_counting_iterator(int64_t(0)),
thrust::make_counting_iterator(int64_t(new_sz)),
[values_accessor,
sorted_indices_ptr,
pool_sizes_ptr,
pool_offsets_ptr,
mx_buffer_ptr,
nvalues] __device__(int64_t index) {
int64_t curr_pool_size = pool_sizes_ptr[index];
auto mx_row = mx_buffer_ptr + index * nvalues;
int64_t offset = pool_offsets_ptr[index];
for (int64_t p = 0; p < curr_pool_size; p++) {
int64_t i = *(sorted_indices_ptr + offset + p);
auto values_row = values_accessor[i].data();
for (int64_t j = 0; j < nvalues; j++) {
mx_row[j] = c10::cuda::compat::max(mx_row[j], values_row[j]);
}
}
});
}
return std::make_tuple(
sorted_indices, pool_offsets, pool_sizes, mx_buffer);
}
template <typename scalar_t, bool LogSoftMax>
void cuda_sparse_coo_softmax(
Tensor& output,
const Tensor& input,
const int64_t dim) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax for the CPU
implementation of the sparse softmax algorithm that this implementation is
based on.
*/
auto sparse_dim = input.sparse_dim();
auto indices = input._indices().contiguous();
auto values = input._values().contiguous();
auto out_values = output._values();
auto out_indices = output._indices();
out_values.resize_as_(values);
out_indices.resize_as_(indices);
out_indices.copy_(indices);
if (dim >= sparse_dim) {
if (LogSoftMax) {
auto new_values =
at::cuda::_log_softmax(values, dim - sparse_dim + 1, false);
out_values.set_(new_values);
} else {
auto new_values = at::cuda::_softmax(values, dim - sparse_dim + 1, false);
out_values.set_(new_values);
}
return;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto nnz = values.size(0);
auto sizes = input.sizes();
auto nvalues = values.numel() / nnz;
/* Prepare accessors */
auto values_2 = values.view({nnz, nvalues});
auto values_accessor = values_2.packed_accessor<scalar_t, 2>();
auto out_values_2 = out_values.view({nnz, nvalues});
auto out_values_accessor = out_values_2.packed_accessor<scalar_t, 2>();
Tensor sorted_indices;
Tensor pool_offsets;
Tensor pool_sizes;
Tensor mx_buffer;
std::tie(sorted_indices, pool_offsets, pool_sizes, mx_buffer) =
compute_pool_max<scalar_t, true>(indices, values_2, sizes, nvalues, dim);
auto pool_size = pool_offsets.size(0);
int block_size = getNumThreads(pool_size);
const int grid_size = (pool_size + block_size - 1) / block_size;
cuda_sparse_coo_softmax_kernel<scalar_t, LogSoftMax>
<<<grid_size, block_size, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
pool_size,
pool_sizes.data_ptr<int64_t>(),
pool_offsets.data_ptr<int64_t>(),
nvalues,
mx_buffer.data_ptr<scalar_t>(),
values_accessor,
out_values_accessor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, bool LogSoftMax>
void cuda_sparse_coo_softmax_backward(
Tensor& grad_input,
const Tensor& grad,
const Tensor& output,
const int64_t dim,
ScalarType input_dtype) {
/*
See ATen/native/sparse/Softmax.cpp:cpu_sparse_coo_softmax_backward for
the CPU implementation of the sparse softmax backward algorithm that this
implementation is based on.
*/
auto sparse_dim = output.sparse_dim();
auto sizes = output.sizes().vec();
auto grad_indices = grad._indices().contiguous();
auto grad_values = grad._values().contiguous();
auto out_indices = output._indices().contiguous();
auto out_values = output._values().contiguous();
auto values = grad_input._values();
auto indices = grad_input._indices();
auto out_nnz = out_values.size(0);
auto grad_nnz = grad_values.size(0);
values.resize_as_(out_values);
values.zero_();
indices.resize_as_(out_indices);
indices.copy_(out_indices);
auto out_offsets = get_offsets(out_indices, sizes, -1);
auto grad_offsets = get_offsets(grad_indices, sizes, -1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
/* when dim >= sparse_dim the dense backward is used */
if (dim >= sparse_dim) {
if (at::native::cuda_equal(out_offsets, grad_offsets) == true) {
if (LogSoftMax) {
auto r = at::cuda::_log_softmax_backward_data(
grad_values, out_values, dim - sparse_dim + 1, input_dtype);
values.set_(r);
} else {
auto r = at::cuda::_softmax_backward_data(grad_values, out_values, dim - sparse_dim + 1, input_dtype);
values.set_(r);
}
} else {
auto host_out_offsets =
out_offsets.to(at::Device(kCPU), indices.dtype(), false, true);
auto host_grad_offsets =
grad_offsets.to(at::Device(kCPU), indices.dtype(), false, true);
auto out_offsets_accessor = host_out_offsets.data_ptr<int64_t>();
auto grad_offsets_accessor = host_grad_offsets.data_ptr<int64_t>();
for (int64_t i = 0; i < out_nnz; i++) {
auto low = thrust::lower_bound(
grad_offsets_accessor,
grad_offsets_accessor + grad_offsets.size(0),
out_offsets_accessor[i]);
auto j = low - grad_offsets_accessor;
/*
Compute output using dense backward only when limits and pools are valid
If this check is false then a sparse tensor with full of zeros is returned
*/
if (j < grad_nnz && out_offsets_accessor[i] == grad_offsets_accessor[j]) {
if (LogSoftMax) {
auto r = at::cuda::_log_softmax_backward_data(
grad_values[j], out_values[i], dim - sparse_dim, input_dtype);
values[i].copy_(r);
} else {
auto r = at::cuda::_softmax_backward_data(
grad_values[j], out_values[i], dim - sparse_dim, input_dtype);
values[i].copy_(r);
}
}
}
}
return;
}
auto nnz = values.size(0);
auto nvalues = values.numel() / nnz;
auto values_2 = values.view({nnz, nvalues});
auto values_accessor = values_2.packed_accessor<scalar_t, 2>();
auto out_values_2 = out_values.view({out_nnz, nvalues});
auto out_values_accessor = out_values_2.packed_accessor<scalar_t, 2>();
auto grad_values_2 = grad_values.view({grad_nnz, nvalues});
auto grad_values_accessor = grad_values_2.packed_accessor<scalar_t, 2>();
Tensor lower_bound_values =
at::empty({out_offsets.size(0)}, indices.options());
thrust::lower_bound(
policy,
thrust_ptr(grad_offsets.data_ptr<int64_t>()),
thrust_ptr(grad_offsets.data_ptr<int64_t>() + grad_offsets.size(0)),
thrust_ptr(out_offsets.data_ptr<int64_t>()),
thrust_ptr(out_offsets.data_ptr<int64_t>()) + out_offsets.size(0),
thrust_ptr(lower_bound_values.data_ptr<int64_t>()));
Tensor sorted_indices;
Tensor pool_offsets;
Tensor pool_sizes;
/* Compute independent pools of indices */
std::tie(
sorted_indices, pool_offsets, pool_sizes, std::ignore) =
compute_pool_max<scalar_t, false>(
out_indices, values_2, sizes, nvalues, dim);
auto pool_size = pool_offsets.size(0);
int block_size = getNumThreads(pool_size);
const int grid_size = (pool_size + block_size - 1) / block_size;
cuda_sparse_coo_softmax_backward_kernel<scalar_t, LogSoftMax>
<<<grid_size, block_size, 0, stream>>>(
sorted_indices.data_ptr<int64_t>(),
pool_size,
pool_sizes.data_ptr<int64_t>(),
pool_offsets.data_ptr<int64_t>(),
nvalues,
grad_nnz,
grad_offsets.data_ptr<int64_t>(),
out_offsets.data_ptr<int64_t>(),
lower_bound_values.data_ptr<int64_t>(),
values_accessor,
out_values_accessor,
grad_values_accessor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} // end anonymous namespace
Tensor softmax_sparse_cuda(
const Tensor& input_,
const int64_t dim,
const bool half_to_float) {
Tensor input, output;
std::tie(input, output) = softmax_sparse_input_preprocessing(
input_, dim, half_to_float, "softmax");
if (input.numel() == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softmax", [&] {
cuda_sparse_coo_softmax<scalar_t, false>(output, input, dim);
});
return output;
}
Tensor log_softmax_sparse_cuda(
const Tensor& input_,
const int64_t dim,
const bool half_to_float) {
Tensor input, output;
std::tie(input, output) = softmax_sparse_input_preprocessing(
input_, dim, half_to_float, "log_softmax");
if (input.numel() == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "log_softmax", [&] {
cuda_sparse_coo_softmax<scalar_t, true>(output, input, dim);
});
return output;
}
Tensor softmax_backward_sparse_cuda(
const Tensor& grad_,
const Tensor& output_,
int64_t dim_,
const Tensor& input_) {
Tensor grad_input, grad, output;
std::tie(grad_input, grad, output) =
softmax_backward_sparse_input_preprocessing(
grad_, output_, dim_, input_, "softmax_backward");
if (output.numel() == 0) {
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "softmax_backward", [&] {
cuda_sparse_coo_softmax_backward<scalar_t, false>(
grad_input, grad, output, dim_, input_.scalar_type());
});
return grad_input;
}
Tensor log_softmax_backward_sparse_cuda(
const Tensor& grad_,
const Tensor& output_,
int64_t dim_,
const Tensor& input_) {
Tensor grad_input, grad, output;
std::tie(grad_input, grad, output) =
softmax_backward_sparse_input_preprocessing(
grad_, output_, dim_, input_, "log_softmax_backward");
if (output.numel() == 0) {
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "log_softmax_backward", [&] {
cuda_sparse_coo_softmax_backward<scalar_t, true>(
grad_input, grad, output, dim_, input_.scalar_type());
});
return grad_input;
}
} // namespace native
} // namespace at
|
b98a885d02a8a301d3e62468963ab03d9879fe80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
#define TILE_WIDTH 16
// Task 1 - simple matrix multiplication
__global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width)
{
//TODO: calculate the row & column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float product = 0;
//TODO: do dot product between row of ma and column of mb
for (int i = 0; i < width; ++i) {
product += ma[row * width + i] * mb[i * width + col];
}
//TODO: write result in mc
mc[row * width + col] = product;
}
// Task 2 - optimized matrix multiplication
__global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
//TODO: allocate 2D tiles in __shared__ memory
__shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH];
__shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH];
//TODO: calculate the row & column index of the element
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float result = 0;
// loop over the tiles of the input
for(int t = 0; t < width/TILE_WIDTH; ++t) {
//TODO: load tiles into __shared__ memory allocated before
ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx];
mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col];
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
//TODO: do dot product between row of tile from ma and column of tile from mb
for (int i = 0; i < TILE_WIDTH; ++i) {
result += ma_tile[ty][i] * mb_tile[i][tx];
}
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
}
//TODO: write result in mc
mc[row * width + col] = result;
}
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
// note that n measures the width of the matrix, not the number of total elements
const size_t n = 1<<10;
const dim3 block_size(TILE_WIDTH,TILE_WIDTH);
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// generate random input on the host
std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n);
for(int i = 0; i < n*n; ++i) {
host_a[i] = static_cast<float>(rand()) / RAND_MAX;
host_b[i] = static_cast<float>(rand()) / RAND_MAX;
}
// allocate storage for the device
float *device_a = 0, *device_b = 0, *device_c = 0;
hipMalloc((void**)&device_a, sizeof(float) * n * n);
hipMalloc((void**)&device_b, sizeof(float) * n * n);
hipMalloc((void**)&device_c, sizeof(float) * n * n);
// copy input to the device
hipMemcpy(device_a, &host_a[0], sizeof(float) * n * n, hipMemcpyHostToDevice);
hipMemcpy(device_b, &host_b[0], sizeof(float) * n * n, hipMemcpyHostToDevice);
//Task 3 - measure the time spent in the kernel for simple and optimized implementation
//TODO: create CUDA events for measuring kernel time
hipEvent_t launch_begin, launch_end;
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
// time many kernel launches and take the average time
const size_t num_launches = 100;
float average_simple_time = 0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
hipEventRecord(launch_begin, 0);
hipLaunchKernelGGL(( matrix_multiply_simple), dim3(num_blocks),dim3(block_size), 0, 0, device_a, device_b, device_c, n);
hipEventRecord(launch_end, 0);
hipLaunchKernelGGL(( matrix_multiply_simple), dim3(num_blocks),dim3(block_size), 0, 0, device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
hipEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
std::cout << " done." << std::endl;
//now time the optimized kernel
// time many kernel launches and take the average time
float average_optimized_time = 0;
std::cout << "Timing optimized implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
hipEventRecord(launch_begin, 0);
hipLaunchKernelGGL(( matrix_multiply), dim3(num_blocks),dim3(block_size), 0, 0, device_a, device_b, device_c, n);
hipEventRecord(launch_end, 0);
hipLaunchKernelGGL(( matrix_multiply), dim3(num_blocks),dim3(block_size), 0, 0, device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
hipEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
average_optimized_time += time;
}
average_optimized_time /= num_launches;
std::cout << " done." << std::endl;
// report the effective throughput of each kernel in GFLOPS
// the effective throughput is measured as the number of floating point operations performed per second:
// (one mul + one add) * N^3
float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f;
float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f;
std::cout << "Matrix size: " << n << "x" << n << std::endl;
std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl;
std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl;
std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl;
std::cout << std::endl;
//TODO: destroy the CUDA events
hipEventDestroy(launch_begin);
hipEventDestroy(launch_end);
// deallocate device memory
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
return 0;
}
| b98a885d02a8a301d3e62468963ab03d9879fe80.cu | #include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
#define TILE_WIDTH 16
// Task 1 - simple matrix multiplication
__global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width)
{
//TODO: calculate the row & column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float product = 0;
//TODO: do dot product between row of ma and column of mb
for (int i = 0; i < width; ++i) {
product += ma[row * width + i] * mb[i * width + col];
}
//TODO: write result in mc
mc[row * width + col] = product;
}
// Task 2 - optimized matrix multiplication
__global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
//TODO: allocate 2D tiles in __shared__ memory
__shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH];
__shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH];
//TODO: calculate the row & column index of the element
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float result = 0;
// loop over the tiles of the input
for(int t = 0; t < width/TILE_WIDTH; ++t) {
//TODO: load tiles into __shared__ memory allocated before
ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx];
mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col];
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
//TODO: do dot product between row of tile from ma and column of tile from mb
for (int i = 0; i < TILE_WIDTH; ++i) {
result += ma_tile[ty][i] * mb_tile[i][tx];
}
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
}
//TODO: write result in mc
mc[row * width + col] = result;
}
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
// note that n measures the width of the matrix, not the number of total elements
const size_t n = 1<<10;
const dim3 block_size(TILE_WIDTH,TILE_WIDTH);
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// generate random input on the host
std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n);
for(int i = 0; i < n*n; ++i) {
host_a[i] = static_cast<float>(rand()) / RAND_MAX;
host_b[i] = static_cast<float>(rand()) / RAND_MAX;
}
// allocate storage for the device
float *device_a = 0, *device_b = 0, *device_c = 0;
cudaMalloc((void**)&device_a, sizeof(float) * n * n);
cudaMalloc((void**)&device_b, sizeof(float) * n * n);
cudaMalloc((void**)&device_c, sizeof(float) * n * n);
// copy input to the device
cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
//Task 3 - measure the time spent in the kernel for simple and optimized implementation
//TODO: create CUDA events for measuring kernel time
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
// time many kernel launches and take the average time
const size_t num_launches = 100;
float average_simple_time = 0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
cudaEventRecord(launch_begin, 0);
matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
cudaEventRecord(launch_end, 0);
matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
cudaEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
std::cout << " done." << std::endl;
//now time the optimized kernel
// time many kernel launches and take the average time
float average_optimized_time = 0;
std::cout << "Timing optimized implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
cudaEventRecord(launch_begin, 0);
matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
cudaEventRecord(launch_end, 0);
matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
cudaEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_optimized_time += time;
}
average_optimized_time /= num_launches;
std::cout << " done." << std::endl;
// report the effective throughput of each kernel in GFLOPS
// the effective throughput is measured as the number of floating point operations performed per second:
// (one mul + one add) * N^3
float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f;
float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f;
std::cout << "Matrix size: " << n << "x" << n << std::endl;
std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl;
std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl;
std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl;
std::cout << std::endl;
//TODO: destroy the CUDA events
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate device memory
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
0e03bfc3abf302788767e158e9123acf8bacc20d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../io/io.h" // provides chprintf
#include "../utils/error_handling.h" // provides chexit
__global__ void Dump_Values_Kernel(Real* device_array, int array_size, int marker)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= array_size) {
return;
}
kernel_printf("Dump Values: marker %d tid %d value %g \n", marker, tid, device_array[tid]);
}
/*
Prints out all values of a device_array
*/
void Dump_Values(Real* device_array, int array_size, int marker)
{
int ngrid = (array_size + TPB - 1) / TPB;
dim3 dim1dGrid(ngrid, 1, 1);
dim3 dim1dBlock(TPB, 1, 1);
hipLaunchKernelGGL(Dump_Values_Kernel, dim1dGrid, dim1dBlock, 0, 0, device_array, array_size, marker);
}
__global__ void Check_For_Nan_Kernel(Real* device_array, int array_size, int check_num, bool* out_bool)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= array_size) {
return;
}
if (device_array[tid] == device_array[tid]) {
return;
}
out_bool[0] = true;
kernel_printf("Check_For_Nan_Kernel found Nan Checknum: %d Thread: %d\n", check_num, tid);
}
/*
Checks a device_array for NaN and prints/exits if found
*/
void Check_For_Nan(Real* device_array, int array_size, int check_num)
{
bool host_out_bool[1] = {false};
bool* out_bool;
hipMalloc((void**)&out_bool, sizeof(bool));
hipMemcpy(out_bool, host_out_bool, sizeof(bool), hipMemcpyHostToDevice);
int ngrid = (array_size + TPB - 1) / TPB;
dim3 dim1dGrid(ngrid, 1, 1);
dim3 dim1dBlock(TPB, 1, 1);
hipLaunchKernelGGL(Check_For_Nan_Kernel, dim1dGrid, dim1dBlock, 0, 0, device_array, array_size, check_num, out_bool);
hipMemcpy(host_out_bool, out_bool, sizeof(bool), hipMemcpyDeviceToHost);
hipFree(out_bool);
if (host_out_bool[0]) {
chexit(-1);
}
}
| 0e03bfc3abf302788767e158e9123acf8bacc20d.cu | #include <math.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../io/io.h" // provides chprintf
#include "../utils/error_handling.h" // provides chexit
__global__ void Dump_Values_Kernel(Real* device_array, int array_size, int marker)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= array_size) {
return;
}
kernel_printf("Dump Values: marker %d tid %d value %g \n", marker, tid, device_array[tid]);
}
/*
Prints out all values of a device_array
*/
void Dump_Values(Real* device_array, int array_size, int marker)
{
int ngrid = (array_size + TPB - 1) / TPB;
dim3 dim1dGrid(ngrid, 1, 1);
dim3 dim1dBlock(TPB, 1, 1);
hipLaunchKernelGGL(Dump_Values_Kernel, dim1dGrid, dim1dBlock, 0, 0, device_array, array_size, marker);
}
__global__ void Check_For_Nan_Kernel(Real* device_array, int array_size, int check_num, bool* out_bool)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= array_size) {
return;
}
if (device_array[tid] == device_array[tid]) {
return;
}
out_bool[0] = true;
kernel_printf("Check_For_Nan_Kernel found Nan Checknum: %d Thread: %d\n", check_num, tid);
}
/*
Checks a device_array for NaN and prints/exits if found
*/
void Check_For_Nan(Real* device_array, int array_size, int check_num)
{
bool host_out_bool[1] = {false};
bool* out_bool;
cudaMalloc((void**)&out_bool, sizeof(bool));
cudaMemcpy(out_bool, host_out_bool, sizeof(bool), cudaMemcpyHostToDevice);
int ngrid = (array_size + TPB - 1) / TPB;
dim3 dim1dGrid(ngrid, 1, 1);
dim3 dim1dBlock(TPB, 1, 1);
hipLaunchKernelGGL(Check_For_Nan_Kernel, dim1dGrid, dim1dBlock, 0, 0, device_array, array_size, check_num, out_bool);
cudaMemcpy(host_out_bool, out_bool, sizeof(bool), cudaMemcpyDeviceToHost);
cudaFree(out_bool);
if (host_out_bool[0]) {
chexit(-1);
}
}
|
9a50ef50f49f056370d653b00889a0545db6b811.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
#include <stdio.h>
__device__
static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__
void bitonic_kernel(int *data, int k, int j)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ixj=i^j; // Calculate indexing!
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
printf("hello");
int *dev_data;
int size = N * sizeof(int);
hipMalloc((void**)&dev_data, size);
hipMemcpy(dev_data, data, size, hipMemcpyHostToDevice);
dim3 dimBlock (min(N ,1024), 1);
dim3 dimGrid (N / 1024 + 1, 1);
int j,k;
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
hipLaunchKernelGGL(( bitonic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_data, k, j);
hipDeviceSynchronize();
}
}
hipMemcpy(data, dev_data, size, hipMemcpyDeviceToHost);
hipFree(dev_data);
}
| 9a50ef50f49f056370d653b00889a0545db6b811.cu |
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
#include <stdio.h>
__device__
static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__
void bitonic_kernel(int *data, int k, int j)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int ixj=i^j; // Calculate indexing!
if ((ixj)>i)
{
if ((i&k)==0 && data[i]>data[ixj]) exchange(&data[i],&data[ixj]);
if ((i&k)!=0 && data[i]<data[ixj]) exchange(&data[i],&data[ixj]);
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
printf("hello");
int *dev_data;
int size = N * sizeof(int);
cudaMalloc((void**)&dev_data, size);
cudaMemcpy(dev_data, data, size, cudaMemcpyHostToDevice);
dim3 dimBlock (min(N ,1024), 1);
dim3 dimGrid (N / 1024 + 1, 1);
int j,k;
for (k=2;k<=N;k=2*k) // Outer loop, double size for each step
{
for (j=k>>1;j>0;j=j>>1) // Inner loop, half size for each step
{
bitonic_kernel<<<dimGrid, dimBlock>>>(dev_data, k, j);
cudaThreadSynchronize();
}
}
cudaMemcpy(data, dev_data, size, cudaMemcpyDeviceToHost);
cudaFree(dev_data);
}
|
095d411dec3d3a1a0ced574181f7fddd31f2f1e1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/cublas_wrappers.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <cuml/decomposition/params.hpp>
#include <pca/pca.cuh>
#include <random/rng.cuh>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T>
struct PcaInputs {
T tolerance;
int len;
int n_row;
int n_col;
int len2;
int n_row2;
int n_col2;
unsigned long long int seed;
int algo;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const PcaInputs<T>& dims) {
return os;
}
template <typename T>
class PcaTest : public ::testing::TestWithParam<PcaInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng r(params.seed, MLCommon::Random::GenTaps);
int len = params.len;
allocate(data, len);
allocate(data_back, len);
allocate(trans_data, len);
allocate(trans_data_ref, len);
std::vector<T> data_h = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};
data_h.resize(len);
updateDevice(data, data_h.data(), len, stream);
std::vector<T> trans_data_ref_h = {-2.3231, -0.3517, 2.6748,
-0.3979, 0.6571, -0.2592};
trans_data_ref_h.resize(len);
updateDevice(trans_data_ref, trans_data_ref_h.data(), len, stream);
int len_comp = params.n_col * params.n_col;
allocate(components, len_comp);
allocate(explained_vars, params.n_col);
allocate(explained_var_ratio, params.n_col);
allocate(singular_vals, params.n_col);
allocate(mean, params.n_col);
allocate(noise_vars, 1);
std::vector<T> components_ref_h = {0.8163, 0.5776, -0.5776, 0.8163};
components_ref_h.resize(len_comp);
std::vector<T> explained_vars_ref_h = {6.338, 0.3287};
explained_vars_ref_h.resize(params.n_col);
allocate(components_ref, len_comp);
allocate(explained_vars_ref, params.n_col);
updateDevice(components_ref, components_ref_h.data(), len_comp, stream);
updateDevice(explained_vars_ref, explained_vars_ref_h.data(), params.n_col,
stream);
paramsPCA prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
pcaFit(handle.getImpl(), data, components, explained_vars,
explained_var_ratio, singular_vals, mean, noise_vars, prms, stream);
pcaTransform(handle.getImpl(), data, components, trans_data, singular_vals,
mean, prms, stream);
pcaInverseTransform(handle.getImpl(), trans_data, components, singular_vals,
mean, data_back, prms, stream);
}
void advancedTest() {
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng r(params.seed, MLCommon::Random::GenTaps);
int len = params.len2;
paramsPCA prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
allocate(data2, len);
r.uniform(data2, len, T(-1.0), T(1.0), stream);
allocate(data2_trans, prms.n_rows * prms.n_components);
int len_comp = params.n_col2 * prms.n_components;
allocate(components2, len_comp);
allocate(explained_vars2, prms.n_components);
allocate(explained_var_ratio2, prms.n_components);
allocate(singular_vals2, prms.n_components);
allocate(mean2, prms.n_cols);
allocate(noise_vars2, 1);
pcaFitTransform(handle.getImpl(), data2, data2_trans, components2,
explained_vars2, explained_var_ratio2, singular_vals2,
mean2, noise_vars2, prms, stream);
allocate(data2_back, len);
pcaInverseTransform(handle.getImpl(), data2_trans, components2,
singular_vals2, mean2, data2_back, prms, stream);
}
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.setStream(stream);
basicTest();
advancedTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(components));
CUDA_CHECK(hipFree(trans_data));
CUDA_CHECK(hipFree(data_back));
CUDA_CHECK(hipFree(trans_data_ref));
CUDA_CHECK(hipFree(explained_vars));
CUDA_CHECK(hipFree(explained_var_ratio));
CUDA_CHECK(hipFree(singular_vals));
CUDA_CHECK(hipFree(mean));
CUDA_CHECK(hipFree(noise_vars));
CUDA_CHECK(hipFree(components_ref));
CUDA_CHECK(hipFree(explained_vars_ref));
CUDA_CHECK(hipFree(data2));
CUDA_CHECK(hipFree(data2_trans));
CUDA_CHECK(hipFree(data2_back));
CUDA_CHECK(hipFree(components2));
CUDA_CHECK(hipFree(explained_vars2));
CUDA_CHECK(hipFree(explained_var_ratio2));
CUDA_CHECK(hipFree(singular_vals2));
CUDA_CHECK(hipFree(mean2));
CUDA_CHECK(hipFree(noise_vars2));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
PcaInputs<T> params;
T *data, *trans_data, *data_back, *components, *explained_vars,
*explained_var_ratio, *singular_vals, *mean, *noise_vars, *trans_data_ref,
*components_ref, *explained_vars_ref;
T *data2, *data2_trans, *data2_back, *components2, *explained_vars2,
*explained_var_ratio2, *singular_vals2, *mean2, *noise_vars2;
cumlHandle handle;
hipStream_t stream;
};
const std::vector<PcaInputs<float>> inputsf2 = {
{0.01f, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0},
{0.01f, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1}};
const std::vector<PcaInputs<double>> inputsd2 = {
{0.01, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0},
{0.01, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1}};
typedef PcaTest<float> PcaTestValF;
TEST_P(PcaTestValF, Result) {
ASSERT_TRUE(devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestValD;
TEST_P(PcaTestValD, Result) {
ASSERT_TRUE(devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestLeftVecF;
TEST_P(PcaTestLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestLeftVecD;
TEST_P(PcaTestLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestTransDataF;
TEST_P(PcaTestTransDataF, Result) {
ASSERT_TRUE(devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestTransDataD;
TEST_P(PcaTestTransDataD, Result) {
ASSERT_TRUE(devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecSmallF;
TEST_P(PcaTestDataVecSmallF, Result) {
ASSERT_TRUE(devArrMatch(data, data_back, (params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecSmallD;
TEST_P(PcaTestDataVecSmallD, Result) {
ASSERT_TRUE(devArrMatch(data, data_back, (params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
// FIXME: These tests are disabled due to driver 418+ making them fail:
// https://github.com/rapidsai/cuml/issues/379
typedef PcaTest<float> PcaTestDataVecF;
TEST_P(PcaTestDataVecF, Result) {
ASSERT_TRUE(devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecD;
TEST_P(PcaTestDataVecD, Result) {
ASSERT_TRUE(devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2),
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
| 095d411dec3d3a1a0ced574181f7fddd31f2f1e1.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/cublas_wrappers.h>
#include <test_utils.h>
#include <cuda_utils.cuh>
#include <cuml/decomposition/params.hpp>
#include <pca/pca.cuh>
#include <random/rng.cuh>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T>
struct PcaInputs {
T tolerance;
int len;
int n_row;
int n_col;
int len2;
int n_row2;
int n_col2;
unsigned long long int seed;
int algo;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const PcaInputs<T>& dims) {
return os;
}
template <typename T>
class PcaTest : public ::testing::TestWithParam<PcaInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng r(params.seed, MLCommon::Random::GenTaps);
int len = params.len;
allocate(data, len);
allocate(data_back, len);
allocate(trans_data, len);
allocate(trans_data_ref, len);
std::vector<T> data_h = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};
data_h.resize(len);
updateDevice(data, data_h.data(), len, stream);
std::vector<T> trans_data_ref_h = {-2.3231, -0.3517, 2.6748,
-0.3979, 0.6571, -0.2592};
trans_data_ref_h.resize(len);
updateDevice(trans_data_ref, trans_data_ref_h.data(), len, stream);
int len_comp = params.n_col * params.n_col;
allocate(components, len_comp);
allocate(explained_vars, params.n_col);
allocate(explained_var_ratio, params.n_col);
allocate(singular_vals, params.n_col);
allocate(mean, params.n_col);
allocate(noise_vars, 1);
std::vector<T> components_ref_h = {0.8163, 0.5776, -0.5776, 0.8163};
components_ref_h.resize(len_comp);
std::vector<T> explained_vars_ref_h = {6.338, 0.3287};
explained_vars_ref_h.resize(params.n_col);
allocate(components_ref, len_comp);
allocate(explained_vars_ref, params.n_col);
updateDevice(components_ref, components_ref_h.data(), len_comp, stream);
updateDevice(explained_vars_ref, explained_vars_ref_h.data(), params.n_col,
stream);
paramsPCA prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
pcaFit(handle.getImpl(), data, components, explained_vars,
explained_var_ratio, singular_vals, mean, noise_vars, prms, stream);
pcaTransform(handle.getImpl(), data, components, trans_data, singular_vals,
mean, prms, stream);
pcaInverseTransform(handle.getImpl(), trans_data, components, singular_vals,
mean, data_back, prms, stream);
}
void advancedTest() {
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng r(params.seed, MLCommon::Random::GenTaps);
int len = params.len2;
paramsPCA prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
allocate(data2, len);
r.uniform(data2, len, T(-1.0), T(1.0), stream);
allocate(data2_trans, prms.n_rows * prms.n_components);
int len_comp = params.n_col2 * prms.n_components;
allocate(components2, len_comp);
allocate(explained_vars2, prms.n_components);
allocate(explained_var_ratio2, prms.n_components);
allocate(singular_vals2, prms.n_components);
allocate(mean2, prms.n_cols);
allocate(noise_vars2, 1);
pcaFitTransform(handle.getImpl(), data2, data2_trans, components2,
explained_vars2, explained_var_ratio2, singular_vals2,
mean2, noise_vars2, prms, stream);
allocate(data2_back, len);
pcaInverseTransform(handle.getImpl(), data2_trans, components2,
singular_vals2, mean2, data2_back, prms, stream);
}
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.setStream(stream);
basicTest();
advancedTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(components));
CUDA_CHECK(cudaFree(trans_data));
CUDA_CHECK(cudaFree(data_back));
CUDA_CHECK(cudaFree(trans_data_ref));
CUDA_CHECK(cudaFree(explained_vars));
CUDA_CHECK(cudaFree(explained_var_ratio));
CUDA_CHECK(cudaFree(singular_vals));
CUDA_CHECK(cudaFree(mean));
CUDA_CHECK(cudaFree(noise_vars));
CUDA_CHECK(cudaFree(components_ref));
CUDA_CHECK(cudaFree(explained_vars_ref));
CUDA_CHECK(cudaFree(data2));
CUDA_CHECK(cudaFree(data2_trans));
CUDA_CHECK(cudaFree(data2_back));
CUDA_CHECK(cudaFree(components2));
CUDA_CHECK(cudaFree(explained_vars2));
CUDA_CHECK(cudaFree(explained_var_ratio2));
CUDA_CHECK(cudaFree(singular_vals2));
CUDA_CHECK(cudaFree(mean2));
CUDA_CHECK(cudaFree(noise_vars2));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
PcaInputs<T> params;
T *data, *trans_data, *data_back, *components, *explained_vars,
*explained_var_ratio, *singular_vals, *mean, *noise_vars, *trans_data_ref,
*components_ref, *explained_vars_ref;
T *data2, *data2_trans, *data2_back, *components2, *explained_vars2,
*explained_var_ratio2, *singular_vals2, *mean2, *noise_vars2;
cumlHandle handle;
cudaStream_t stream;
};
const std::vector<PcaInputs<float>> inputsf2 = {
{0.01f, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0},
{0.01f, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1}};
const std::vector<PcaInputs<double>> inputsd2 = {
{0.01, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0},
{0.01, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1}};
typedef PcaTest<float> PcaTestValF;
TEST_P(PcaTestValF, Result) {
ASSERT_TRUE(devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestValD;
TEST_P(PcaTestValD, Result) {
ASSERT_TRUE(devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestLeftVecF;
TEST_P(PcaTestLeftVecF, Result) {
ASSERT_TRUE(devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestLeftVecD;
TEST_P(PcaTestLeftVecD, Result) {
ASSERT_TRUE(devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestTransDataF;
TEST_P(PcaTestTransDataF, Result) {
ASSERT_TRUE(devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestTransDataD;
TEST_P(PcaTestTransDataD, Result) {
ASSERT_TRUE(devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecSmallF;
TEST_P(PcaTestDataVecSmallF, Result) {
ASSERT_TRUE(devArrMatch(data, data_back, (params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecSmallD;
TEST_P(PcaTestDataVecSmallD, Result) {
ASSERT_TRUE(devArrMatch(data, data_back, (params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
// FIXME: These tests are disabled due to driver 418+ making them fail:
// https://github.com/rapidsai/cuml/issues/379
typedef PcaTest<float> PcaTestDataVecF;
TEST_P(PcaTestDataVecF, Result) {
ASSERT_TRUE(devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecD;
TEST_P(PcaTestDataVecD, Result) {
ASSERT_TRUE(devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2),
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataD,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
|
2331a3151612491fad350aac4b6c9d89880c8364.hip | // !!! This is a file automatically generated by hipify!!!
#include "imageAlignment.h"
#include <cassert>
#include <cmath>
#include <sophus/se3.hpp>
#include <NVX/nvxcu.h>
#include <hip/driver_types.h>
#include <hip/hip_runtime.h>
#include <limits>
#include <inttypes.h>
#include <iomanip>
#include <rocblas.h>
#include <chrono>
#include "utils/asserts.h"
#include "utils/nvxcu_debug.h"
#include "utils/nvxcu_utils.h"
#include "utils/nvxcu_imageSave.h"
#include "cuda/imageUtils.h"
#include "primitives/DVOPoint.h"
using Vector6d = Eigen::Matrix<double, 6, 1>;
using Matrix6d = Eigen::Matrix<double, 6, 6>;
__global__
void calculateJacobianAndResidual(const Eigen::Matrix3d rotation, const Eigen::Vector3d translation,
const hipTextureObject_t currentImageTexture,
const hipTextureObject_t gradXTexture, const hipTextureObject_t gradYTexture,
const uint32_t imageWidth, const int32_t imageHeight,
const IntrinsicParameters intrinsicParameters,
const DVOPoint* pointArray, const uint32_t nPoints,
double* Jptr, double* residualPtr, uint32_t* nActivePointsPtr)
{
const uint32_t pointId = blockIdx.x*blockDim.x + threadIdx.x;
if (pointId >= nPoints) { return; }
const DVOPoint point = pointArray[pointId];
const Eigen::Vector3d projectedPoint = rotation * point.pos + translation;
const double& x = projectedPoint(0);
const double& y = projectedPoint(1);
const double& z = projectedPoint(2);
const double& fx = intrinsicParameters.fx;
const double& fy = intrinsicParameters.fy;
const double& cx = intrinsicParameters.cx;
const double& cy = intrinsicParameters.cy;
const double u = fx*x/z + cx;
const double v = fy*y/z + cy;
if (u >= 0.0 and u < static_cast<double>(imageWidth-1) and v >= 0.0 and v < static_cast<double>(imageHeight-1)) // Is the point inside the image
{
const float gradXValue = tex2D<float>(gradXTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5));
const float gradYValue = tex2D<float>(gradYTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5));
const double Ix = static_cast<double>(gradXValue);
const double Iy = static_cast<double>(gradYValue);
const double z_div = 1.0/z;
const double z2_div = 1.0/(z*z);
Jptr[pointId + 0*nPoints] = (Ix*fx)*z_div;
Jptr[pointId + 1*nPoints] = (Iy*fy)*z_div;
Jptr[pointId + 2*nPoints] = -(Ix*fx*x)*z2_div - (Iy*fy*y)*z2_div;
Jptr[pointId + 3*nPoints] = -Iy*fy - y*((Ix*fx*x) + (Iy*fy*y))*z2_div;
Jptr[pointId + 4*nPoints] = Ix*fx + x*((Ix*fx*x) + (Iy*fy*y))*z2_div;
Jptr[pointId + 5*nPoints] = (Iy*fy*x)*z_div - (Ix*fx*y)*z_div;
const double currentImageValue = static_cast<double>(tex2D<float>(currentImageTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5)));
const double previousImageValue = static_cast<double>(point.intensity)/std::numeric_limits<uint8_t>::max();
const double residual = currentImageValue - previousImageValue;
residualPtr[pointId] = residual;
atomicInc(nActivePointsPtr, 0xFFFFFFFF); // Count up the number of points that are active in the optimization
}
else
{
Jptr[pointId + 0*nPoints] = 0.0;
Jptr[pointId + 1*nPoints] = 0.0;
Jptr[pointId + 2*nPoints] = 0.0;
Jptr[pointId + 3*nPoints] = 0.0;
Jptr[pointId + 4*nPoints] = 0.0;
Jptr[pointId + 5*nPoints] = 0.0;
residualPtr[pointId] = 0.0;
}
}
__inline__ __device__
double warpReduceSum(double val)
{
const unsigned int mask = __activemask();
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
val += __shfl_down_sync(mask, val, offset);
}
return val;
}
__inline__ __device__
double blockReduceSum(double val)
{
static __shared__ double shared[32];
const int lane = threadIdx.x % warpSize;
const int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) { shared[wid] = val; } // Write reduced value to shared memory
__syncthreads();
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
//Final reduce within first warp
if (wid == 0) { val = warpReduceSum(val); }
return val;
}
__global__ void deviceReduceSum(double* in, double* out, const int N)
{
double sum = 0.0;
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Reduce multiple elements per thread
for (int i = arrayIndex; i < N; i += blockDim.x * gridDim.x)
{
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
__global__ void calculateStdDev(double* residualArray, double* weights, double* residualSum, const uint32_t nArrayElements, uint32_t* nActivePointsPtr)
{
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (arrayIndex >= nArrayElements) { return; }
const double mean = *residualSum / (*nActivePointsPtr);
const double diff = residualArray[arrayIndex] - mean;
weights[arrayIndex] = diff*diff;
}
__global__ void calculateHuberWeights(double* residualArray, double* weightArray, double* variancePtr, const uint32_t nArrayElements, uint32_t* nActivePointsPtr)
{
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (arrayIndex >= nArrayElements) { return; }
// Value of 1.345 gives 95% efficiency in cases of gaussian distribution and is commonly used with huber weights
const double k = 1.345*sqrt((1.0/(*nActivePointsPtr))*(*variancePtr));
const double absError = abs(residualArray[arrayIndex]);
if (absError > k)
{
weightArray[arrayIndex] = k/absError;
}
else
{
weightArray[arrayIndex] = 1.0;
}
}
__global__ void calculateWeightedJacobian(double* Jwptr, double* Jptr, double* weightsPtr, const uint32_t nPoints)
{
const uint32_t pointId = blockIdx.x*blockDim.x + threadIdx.x;
if (pointId >= nPoints) { return; }
const double weight = weightsPtr[pointId];
Jwptr[pointId + 0*nPoints] = Jptr[pointId + 0*nPoints]*weight;
Jwptr[pointId + 1*nPoints] = Jptr[pointId + 1*nPoints]*weight;
Jwptr[pointId + 2*nPoints] = Jptr[pointId + 2*nPoints]*weight;
Jwptr[pointId + 3*nPoints] = Jptr[pointId + 3*nPoints]*weight;
Jwptr[pointId + 4*nPoints] = Jptr[pointId + 4*nPoints]*weight;
Jwptr[pointId + 5*nPoints] = Jptr[pointId + 5*nPoints]*weight;
}
inline size_t toNext8(size_t size)
{
if (size % 8 == 0)
{
return size;
}
else
{
return ((size/8) + 1)*8;
}
}
nvxcu_tmp_buf_size_t alignImagesBuffSize(const uint32_t baseImageWidth, const uint32_t baseImageHeight)
{
nvxcu_tmp_buf_size_t tmpBufSize{
.dev_buf_size = 0,
.host_buf_size = 0,
};
const uint32_t maxNumberOfPoints = baseImageWidth*baseImageHeight;
tmpBufSize.dev_buf_size += toNext8(sizeof(double)); // d_tmpSumValue
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_tmpSumArray
tmpBufSize.dev_buf_size += toNext8(sizeof(uint32_t)); // d_nActivePoints
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * 6 * sizeof(double)); // d_J
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * 6 * sizeof(double)); // d_Jw
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_residual
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_weights
tmpBufSize.dev_buf_size += toNext8(6 * 6 * sizeof(double)); // d_A
tmpBufSize.dev_buf_size += toNext8(6 * sizeof(double)); // d_b
return tmpBufSize;
}
AlignImageBuffers createAlignImageBuffers(void* tmpBuffer, const uint32_t baseImageWidth, const uint32_t baseImageHeight)
{
const uint32_t maxNumberOfPoints = baseImageWidth*baseImageHeight;
AlignImageBuffers buffers{};
uint8_t* bytePtr = reinterpret_cast<uint8_t*>(tmpBuffer);
buffers.d_tmpSumValue = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(sizeof(double));
buffers.d_tmpSumArray = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_nActivePoints = reinterpret_cast<uint32_t*>(bytePtr);
bytePtr += toNext8(sizeof(uint32_t));
buffers.d_J = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * 6 * sizeof(double));
buffers.d_Jw = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * 6 * sizeof(double));
buffers.d_residual = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_weights = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_A = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(6 * 6 * sizeof(double));
buffers.d_b = reinterpret_cast<double*>(bytePtr);
//bytePtr += toNext8(6*sizeof(double));
return buffers;
}
Eigen::Matrix<double, 6, 1> alignImages(const nvxcu_pitch_linear_pyramid_t& currentImagePyramid, const nvxcu_pitch_linear_pyramid_t& currentImageGradXPyramid, const nvxcu_pitch_linear_pyramid_t& currentImageGradYPyramid,
const nvxcu_pitch_linear_pyramid_t& previousImagePyramid, const nvxcu_pitch_linear_pyramid_t& previousDepthPyramid, const nvxcu_plain_array_t& pointArray, const IntrinsicParameters& intrinsicParameters,
const Eigen::Matrix<double, 6, 1>& initialXi, const AlignImageBuffers& alignImageBuffers, const AlignmentSettings& alignmentSettings, AlignmentStatistics<5>& alignmentStatistics)
{
assert(all_equal_image_size({currentImagePyramid, currentImageGradXPyramid, currentImageGradYPyramid, previousImagePyramid, previousDepthPyramid}));
assert(currentImagePyramid.levels[0].base.format == NVXCU_DF_IMAGE_U8);
assert(currentImageGradXPyramid.levels[0].base.format == NVXCU_DF_IMAGE_S16);
assert(currentImageGradYPyramid.levels[0].base.format == NVXCU_DF_IMAGE_S16);
auto start = std::chrono::steady_clock::now();
hipStream_t stream;
CUDA_SAFE_CALL( hipStreamCreate(&stream) );
hipblasHandle_t cublasHandle;
hipblasCreate(&cublasHandle);
hipblasSetStream(cublasHandle, stream);
const uint32_t levels = currentImagePyramid.base.num_levels;
Vector6d xi = initialXi;
Matrix6d sigma_inv = Matrix6d::Identity()*(1.0/alignmentSettings.motionPrior);
for (int level = levels-1; level >= 0 ; --level)
{
const nvxcu_pitch_linear_image_t& currentImage = currentImagePyramid.levels[level];
const nvxcu_pitch_linear_image_t& currentImageGradX = currentImageGradXPyramid.levels[level];
const nvxcu_pitch_linear_image_t& currentImageGradY = currentImageGradYPyramid.levels[level];
const nvxcu_pitch_linear_image_t& previousImage = previousImagePyramid.levels[level];
const nvxcu_pitch_linear_image_t& previousDepth = previousDepthPyramid.levels[level];
const double levelIntrinsicMultiplier = 1.0 / std::exp2(static_cast<double>(level));
const IntrinsicParameters levelIntrinsics{intrinsicParameters.fx*levelIntrinsicMultiplier, intrinsicParameters.fy*levelIntrinsicMultiplier, intrinsicParameters.cx*levelIntrinsicMultiplier, intrinsicParameters.cy*levelIntrinsicMultiplier};
hipMemsetAsync(pointArray.num_items_dev_ptr, 0, sizeof(uint32_t), stream); // Reset counter
projectDepthPixels(previousImage, previousDepth, pointArray, levelIntrinsics, stream); // Populate pointArray
uint32_t nPoints;
hipMemcpyAsync(reinterpret_cast<void*>(&nPoints), reinterpret_cast<void*>(pointArray.num_items_dev_ptr), sizeof(uint32_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
hipResourceDesc currentImageDesc{};
currentImageDesc.resType = hipResourceTypePitch2D;
currentImageDesc.res.pitch2D.devPtr = currentImage.planes[0].dev_ptr;
currentImageDesc.res.pitch2D.desc = hipCreateChannelDesc(8*sizeof(uint8_t), 0, 0, 0, hipChannelFormatKindUnsigned);
currentImageDesc.res.pitch2D.width = currentImage.base.width;
currentImageDesc.res.pitch2D.height = currentImage.base.height;
currentImageDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImage.planes[0].pitch_in_bytes);
hipResourceDesc gradXResourceDesc{};
gradXResourceDesc.resType = hipResourceTypePitch2D;
gradXResourceDesc.res.pitch2D.devPtr = currentImageGradX.planes[0].dev_ptr;
gradXResourceDesc.res.pitch2D.desc = hipCreateChannelDesc(8*sizeof(int16_t), 0, 0, 0, hipChannelFormatKindSigned);
gradXResourceDesc.res.pitch2D.width = currentImageGradX.base.width;
gradXResourceDesc.res.pitch2D.height = currentImageGradX.base.height;
gradXResourceDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImageGradX.planes[0].pitch_in_bytes);
hipResourceDesc gradYResourceDesc{};
gradYResourceDesc.resType = hipResourceTypePitch2D;
gradYResourceDesc.res.pitch2D.devPtr = currentImageGradY.planes[0].dev_ptr;
gradYResourceDesc.res.pitch2D.desc = hipCreateChannelDesc(8*sizeof(int16_t), 0, 0, 0, hipChannelFormatKindSigned);
gradYResourceDesc.res.pitch2D.width = currentImageGradY.base.width;
gradYResourceDesc.res.pitch2D.height = currentImageGradY.base.height;
gradYResourceDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImageGradY.planes[0].pitch_in_bytes);
hipTextureDesc textureDesc{};
textureDesc.addressMode[0] = hipAddressModeClamp;
textureDesc.addressMode[1] = hipAddressModeClamp;
textureDesc.filterMode = hipFilterModeLinear;
textureDesc.readMode = hipReadModeNormalizedFloat;
hipTextureObject_t currentImageTextureObject;
hipTextureObject_t gradXTextureObject;
hipTextureObject_t gradYTextureObject;
CUDA_SAFE_CALL( hipCreateTextureObject(¤tImageTextureObject, ¤tImageDesc, &textureDesc, nullptr) );
CUDA_SAFE_CALL( hipCreateTextureObject(&gradXTextureObject, &gradXResourceDesc, &textureDesc, nullptr) );
CUDA_SAFE_CALL( hipCreateTextureObject(&gradYTextureObject, &gradYResourceDesc, &textureDesc, nullptr) );
const dim3 jacobianBlockDim(1024, 1, 1);
const dim3 jacobianGridDim(nPoints / jacobianBlockDim.x + (nPoints % jacobianBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 reductionBlockDim(1024, 1, 1);
const dim3 reductionGridDim(nPoints / reductionBlockDim.x + (nPoints % reductionBlockDim.x == 0 ? 0 : 1), 1, 1);
assert(nPoints <= reductionBlockDim.x * reductionGridDim.x);
const dim3 stdDevBlockDim(1024, 1, 1);
const dim3 stdDebGridDim(nPoints / stdDevBlockDim.x + (nPoints % stdDevBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 huberBlockDim(1024, 1, 1);
const dim3 huberGridDim(nPoints / huberBlockDim.x + (nPoints % huberBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 weightedJacobianBlockDim(1024, 1, 1);
const dim3 weightedJacobianGridDim(nPoints / weightedJacobianBlockDim.x + (nPoints % weightedJacobianBlockDim.x == 0 ? 0 : 1), 1, 1);
double stepLength = alignmentSettings.initialStepLength;
for (int iteration = 0; iteration < alignmentSettings.maxIterations; ++iteration)
{
const Sophus::SE3d currentPose = Sophus::SE3d::exp(xi);
hipLaunchKernelGGL(( calculateJacobianAndResidual), dim3(jacobianGridDim), dim3(jacobianBlockDim), 0, stream,
currentPose.rotationMatrix(), currentPose.translation(),
currentImageTextureObject,
gradXTextureObject, gradYTextureObject,
currentImageGradX.base.width, currentImageGradX.base.height,
levelIntrinsics,
reinterpret_cast<DVOPoint*>(pointArray.dev_ptr), nPoints,
alignImageBuffers.d_J, alignImageBuffers.d_residual, alignImageBuffers.d_nActivePoints);
// Sum residuals
hipLaunchKernelGGL(( deviceReduceSum), dim3(reductionGridDim), dim3(reductionBlockDim), 0, stream, alignImageBuffers.d_residual, alignImageBuffers.d_tmpSumArray, nPoints);
hipLaunchKernelGGL(( deviceReduceSum), dim3(1), dim3(1024), 0, stream, alignImageBuffers.d_tmpSumArray, alignImageBuffers.d_tmpSumValue, nPoints); // Store sum in d_tmpSumValue
// Store squared difference from mean in d_weights
hipLaunchKernelGGL(( calculateStdDev), dim3(stdDebGridDim), dim3(stdDevBlockDim), 0, stream, alignImageBuffers.d_residual, alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumValue, nPoints, alignImageBuffers.d_nActivePoints);
// Sum squared differences
hipLaunchKernelGGL(( deviceReduceSum), dim3(reductionGridDim), dim3(reductionBlockDim), 0, stream, alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumArray, nPoints);
hipLaunchKernelGGL(( deviceReduceSum), dim3(1), dim3(1024), 0, stream, alignImageBuffers.d_tmpSumArray, alignImageBuffers.d_tmpSumValue, nPoints); // Store sum in d_tmpSumValue
hipLaunchKernelGGL(( calculateHuberWeights), dim3(huberGridDim), dim3(huberBlockDim), 0, stream, alignImageBuffers.d_residual, alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumValue, nPoints, alignImageBuffers.d_nActivePoints);
hipLaunchKernelGGL(( calculateWeightedJacobian), dim3(weightedJacobianGridDim), dim3(weightedJacobianBlockDim), 0, stream, alignImageBuffers.d_Jw, alignImageBuffers.d_J, alignImageBuffers.d_weights, nPoints);
const double identityScalar = 1.0;
const double zeroScalar = 0.0;
hipblasDgemm(cublasHandle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
6, 6, nPoints,
&identityScalar,
alignImageBuffers.d_Jw, nPoints,
alignImageBuffers.d_J, nPoints,
&zeroScalar,
alignImageBuffers.d_A, 6);
const double negativeIdentityScalar = -1.0;
hipblasDgemm(cublasHandle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
6, 1, nPoints,
&negativeIdentityScalar,
alignImageBuffers.d_Jw, nPoints,
alignImageBuffers.d_residual, nPoints,
&zeroScalar,
alignImageBuffers.d_b, 6);
Matrix6d A;
hipMemcpyAsync(static_cast<void*>(A.data()), static_cast<void*>(alignImageBuffers.d_A), 6*6*sizeof(double), hipMemcpyDeviceToHost, stream);
Vector6d b;
hipMemcpyAsync(static_cast<void*>(b.data()), static_cast<void*>(alignImageBuffers.d_b), 6*sizeof(double), hipMemcpyDeviceToHost, stream);
CUDA_SAFE_CALL( hipStreamSynchronize(stream) );
const Vector6d xi_delta = stepLength * (A + sigma_inv).ldlt().solve(b + sigma_inv*(initialXi - xi));
if (std::isnan(xi_delta.sum()))
{
std::cerr << "============= XI is NAN =============\n";
return Vector6d::Zero();
}
xi = Sophus::SE3d::log(currentPose * Sophus::SE3d::exp(xi_delta));
alignmentStatistics.iterationPerLevel.at(level)++;
const double xi_delta_norm = xi_delta.norm();
if ((1.0/stepLength) * xi_delta_norm <= alignmentSettings.xiConvergenceLength) { break; }
if (stepLength > alignmentSettings.minStepLength) { stepLength *= alignmentSettings.stepLengthReductionFactor; }
}
CUDA_SAFE_CALL( hipDestroyTextureObject(gradXTextureObject) );
CUDA_SAFE_CALL( hipDestroyTextureObject(gradYTextureObject) );
}
hipblasDestroy(cublasHandle);
CUDA_SAFE_CALL( hipStreamDestroy(stream) );
auto end = std::chrono::steady_clock::now();
alignmentStatistics.elapsedMicroseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
return xi;
}
| 2331a3151612491fad350aac4b6c9d89880c8364.cu | #include "imageAlignment.h"
#include <cassert>
#include <cmath>
#include <sophus/se3.hpp>
#include <NVX/nvxcu.h>
#include <driver_types.h>
#include <cuda_runtime.h>
#include <limits>
#include <inttypes.h>
#include <iomanip>
#include <cublas_v2.h>
#include <chrono>
#include "utils/asserts.h"
#include "utils/nvxcu_debug.h"
#include "utils/nvxcu_utils.h"
#include "utils/nvxcu_imageSave.h"
#include "cuda/imageUtils.h"
#include "primitives/DVOPoint.h"
using Vector6d = Eigen::Matrix<double, 6, 1>;
using Matrix6d = Eigen::Matrix<double, 6, 6>;
__global__
void calculateJacobianAndResidual(const Eigen::Matrix3d rotation, const Eigen::Vector3d translation,
const cudaTextureObject_t currentImageTexture,
const cudaTextureObject_t gradXTexture, const cudaTextureObject_t gradYTexture,
const uint32_t imageWidth, const int32_t imageHeight,
const IntrinsicParameters intrinsicParameters,
const DVOPoint* pointArray, const uint32_t nPoints,
double* Jptr, double* residualPtr, uint32_t* nActivePointsPtr)
{
const uint32_t pointId = blockIdx.x*blockDim.x + threadIdx.x;
if (pointId >= nPoints) { return; }
const DVOPoint point = pointArray[pointId];
const Eigen::Vector3d projectedPoint = rotation * point.pos + translation;
const double& x = projectedPoint(0);
const double& y = projectedPoint(1);
const double& z = projectedPoint(2);
const double& fx = intrinsicParameters.fx;
const double& fy = intrinsicParameters.fy;
const double& cx = intrinsicParameters.cx;
const double& cy = intrinsicParameters.cy;
const double u = fx*x/z + cx;
const double v = fy*y/z + cy;
if (u >= 0.0 and u < static_cast<double>(imageWidth-1) and v >= 0.0 and v < static_cast<double>(imageHeight-1)) // Is the point inside the image
{
const float gradXValue = tex2D<float>(gradXTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5));
const float gradYValue = tex2D<float>(gradYTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5));
const double Ix = static_cast<double>(gradXValue);
const double Iy = static_cast<double>(gradYValue);
const double z_div = 1.0/z;
const double z2_div = 1.0/(z*z);
Jptr[pointId + 0*nPoints] = (Ix*fx)*z_div;
Jptr[pointId + 1*nPoints] = (Iy*fy)*z_div;
Jptr[pointId + 2*nPoints] = -(Ix*fx*x)*z2_div - (Iy*fy*y)*z2_div;
Jptr[pointId + 3*nPoints] = -Iy*fy - y*((Ix*fx*x) + (Iy*fy*y))*z2_div;
Jptr[pointId + 4*nPoints] = Ix*fx + x*((Ix*fx*x) + (Iy*fy*y))*z2_div;
Jptr[pointId + 5*nPoints] = (Iy*fy*x)*z_div - (Ix*fx*y)*z_div;
const double currentImageValue = static_cast<double>(tex2D<float>(currentImageTexture, static_cast<float>(u + 0.5), static_cast<float>(v + 0.5)));
const double previousImageValue = static_cast<double>(point.intensity)/std::numeric_limits<uint8_t>::max();
const double residual = currentImageValue - previousImageValue;
residualPtr[pointId] = residual;
atomicInc(nActivePointsPtr, 0xFFFFFFFF); // Count up the number of points that are active in the optimization
}
else
{
Jptr[pointId + 0*nPoints] = 0.0;
Jptr[pointId + 1*nPoints] = 0.0;
Jptr[pointId + 2*nPoints] = 0.0;
Jptr[pointId + 3*nPoints] = 0.0;
Jptr[pointId + 4*nPoints] = 0.0;
Jptr[pointId + 5*nPoints] = 0.0;
residualPtr[pointId] = 0.0;
}
}
__inline__ __device__
double warpReduceSum(double val)
{
const unsigned int mask = __activemask();
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
val += __shfl_down_sync(mask, val, offset);
}
return val;
}
__inline__ __device__
double blockReduceSum(double val)
{
static __shared__ double shared[32];
const int lane = threadIdx.x % warpSize;
const int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) { shared[wid] = val; } // Write reduced value to shared memory
__syncthreads();
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
//Final reduce within first warp
if (wid == 0) { val = warpReduceSum(val); }
return val;
}
__global__ void deviceReduceSum(double* in, double* out, const int N)
{
double sum = 0.0;
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Reduce multiple elements per thread
for (int i = arrayIndex; i < N; i += blockDim.x * gridDim.x)
{
sum += in[i];
}
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
{
out[blockIdx.x] = sum;
}
}
__global__ void calculateStdDev(double* residualArray, double* weights, double* residualSum, const uint32_t nArrayElements, uint32_t* nActivePointsPtr)
{
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (arrayIndex >= nArrayElements) { return; }
const double mean = *residualSum / (*nActivePointsPtr);
const double diff = residualArray[arrayIndex] - mean;
weights[arrayIndex] = diff*diff;
}
__global__ void calculateHuberWeights(double* residualArray, double* weightArray, double* variancePtr, const uint32_t nArrayElements, uint32_t* nActivePointsPtr)
{
const uint32_t arrayIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (arrayIndex >= nArrayElements) { return; }
// Value of 1.345 gives 95% efficiency in cases of gaussian distribution and is commonly used with huber weights
const double k = 1.345*sqrt((1.0/(*nActivePointsPtr))*(*variancePtr));
const double absError = abs(residualArray[arrayIndex]);
if (absError > k)
{
weightArray[arrayIndex] = k/absError;
}
else
{
weightArray[arrayIndex] = 1.0;
}
}
__global__ void calculateWeightedJacobian(double* Jwptr, double* Jptr, double* weightsPtr, const uint32_t nPoints)
{
const uint32_t pointId = blockIdx.x*blockDim.x + threadIdx.x;
if (pointId >= nPoints) { return; }
const double weight = weightsPtr[pointId];
Jwptr[pointId + 0*nPoints] = Jptr[pointId + 0*nPoints]*weight;
Jwptr[pointId + 1*nPoints] = Jptr[pointId + 1*nPoints]*weight;
Jwptr[pointId + 2*nPoints] = Jptr[pointId + 2*nPoints]*weight;
Jwptr[pointId + 3*nPoints] = Jptr[pointId + 3*nPoints]*weight;
Jwptr[pointId + 4*nPoints] = Jptr[pointId + 4*nPoints]*weight;
Jwptr[pointId + 5*nPoints] = Jptr[pointId + 5*nPoints]*weight;
}
inline size_t toNext8(size_t size)
{
if (size % 8 == 0)
{
return size;
}
else
{
return ((size/8) + 1)*8;
}
}
nvxcu_tmp_buf_size_t alignImagesBuffSize(const uint32_t baseImageWidth, const uint32_t baseImageHeight)
{
nvxcu_tmp_buf_size_t tmpBufSize{
.dev_buf_size = 0,
.host_buf_size = 0,
};
const uint32_t maxNumberOfPoints = baseImageWidth*baseImageHeight;
tmpBufSize.dev_buf_size += toNext8(sizeof(double)); // d_tmpSumValue
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_tmpSumArray
tmpBufSize.dev_buf_size += toNext8(sizeof(uint32_t)); // d_nActivePoints
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * 6 * sizeof(double)); // d_J
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * 6 * sizeof(double)); // d_Jw
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_residual
tmpBufSize.dev_buf_size += toNext8(maxNumberOfPoints * sizeof(double)); // d_weights
tmpBufSize.dev_buf_size += toNext8(6 * 6 * sizeof(double)); // d_A
tmpBufSize.dev_buf_size += toNext8(6 * sizeof(double)); // d_b
return tmpBufSize;
}
AlignImageBuffers createAlignImageBuffers(void* tmpBuffer, const uint32_t baseImageWidth, const uint32_t baseImageHeight)
{
const uint32_t maxNumberOfPoints = baseImageWidth*baseImageHeight;
AlignImageBuffers buffers{};
uint8_t* bytePtr = reinterpret_cast<uint8_t*>(tmpBuffer);
buffers.d_tmpSumValue = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(sizeof(double));
buffers.d_tmpSumArray = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_nActivePoints = reinterpret_cast<uint32_t*>(bytePtr);
bytePtr += toNext8(sizeof(uint32_t));
buffers.d_J = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * 6 * sizeof(double));
buffers.d_Jw = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * 6 * sizeof(double));
buffers.d_residual = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_weights = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(maxNumberOfPoints * sizeof(double));
buffers.d_A = reinterpret_cast<double*>(bytePtr);
bytePtr += toNext8(6 * 6 * sizeof(double));
buffers.d_b = reinterpret_cast<double*>(bytePtr);
//bytePtr += toNext8(6*sizeof(double));
return buffers;
}
Eigen::Matrix<double, 6, 1> alignImages(const nvxcu_pitch_linear_pyramid_t& currentImagePyramid, const nvxcu_pitch_linear_pyramid_t& currentImageGradXPyramid, const nvxcu_pitch_linear_pyramid_t& currentImageGradYPyramid,
const nvxcu_pitch_linear_pyramid_t& previousImagePyramid, const nvxcu_pitch_linear_pyramid_t& previousDepthPyramid, const nvxcu_plain_array_t& pointArray, const IntrinsicParameters& intrinsicParameters,
const Eigen::Matrix<double, 6, 1>& initialXi, const AlignImageBuffers& alignImageBuffers, const AlignmentSettings& alignmentSettings, AlignmentStatistics<5>& alignmentStatistics)
{
assert(all_equal_image_size({currentImagePyramid, currentImageGradXPyramid, currentImageGradYPyramid, previousImagePyramid, previousDepthPyramid}));
assert(currentImagePyramid.levels[0].base.format == NVXCU_DF_IMAGE_U8);
assert(currentImageGradXPyramid.levels[0].base.format == NVXCU_DF_IMAGE_S16);
assert(currentImageGradYPyramid.levels[0].base.format == NVXCU_DF_IMAGE_S16);
auto start = std::chrono::steady_clock::now();
cudaStream_t stream;
CUDA_SAFE_CALL( cudaStreamCreate(&stream) );
cublasHandle_t cublasHandle;
cublasCreate_v2(&cublasHandle);
cublasSetStream_v2(cublasHandle, stream);
const uint32_t levels = currentImagePyramid.base.num_levels;
Vector6d xi = initialXi;
Matrix6d sigma_inv = Matrix6d::Identity()*(1.0/alignmentSettings.motionPrior);
for (int level = levels-1; level >= 0 ; --level)
{
const nvxcu_pitch_linear_image_t& currentImage = currentImagePyramid.levels[level];
const nvxcu_pitch_linear_image_t& currentImageGradX = currentImageGradXPyramid.levels[level];
const nvxcu_pitch_linear_image_t& currentImageGradY = currentImageGradYPyramid.levels[level];
const nvxcu_pitch_linear_image_t& previousImage = previousImagePyramid.levels[level];
const nvxcu_pitch_linear_image_t& previousDepth = previousDepthPyramid.levels[level];
const double levelIntrinsicMultiplier = 1.0 / std::exp2(static_cast<double>(level));
const IntrinsicParameters levelIntrinsics{intrinsicParameters.fx*levelIntrinsicMultiplier, intrinsicParameters.fy*levelIntrinsicMultiplier, intrinsicParameters.cx*levelIntrinsicMultiplier, intrinsicParameters.cy*levelIntrinsicMultiplier};
cudaMemsetAsync(pointArray.num_items_dev_ptr, 0, sizeof(uint32_t), stream); // Reset counter
projectDepthPixels(previousImage, previousDepth, pointArray, levelIntrinsics, stream); // Populate pointArray
uint32_t nPoints;
cudaMemcpyAsync(reinterpret_cast<void*>(&nPoints), reinterpret_cast<void*>(pointArray.num_items_dev_ptr), sizeof(uint32_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaResourceDesc currentImageDesc{};
currentImageDesc.resType = cudaResourceTypePitch2D;
currentImageDesc.res.pitch2D.devPtr = currentImage.planes[0].dev_ptr;
currentImageDesc.res.pitch2D.desc = cudaCreateChannelDesc(8*sizeof(uint8_t), 0, 0, 0, cudaChannelFormatKindUnsigned);
currentImageDesc.res.pitch2D.width = currentImage.base.width;
currentImageDesc.res.pitch2D.height = currentImage.base.height;
currentImageDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImage.planes[0].pitch_in_bytes);
cudaResourceDesc gradXResourceDesc{};
gradXResourceDesc.resType = cudaResourceTypePitch2D;
gradXResourceDesc.res.pitch2D.devPtr = currentImageGradX.planes[0].dev_ptr;
gradXResourceDesc.res.pitch2D.desc = cudaCreateChannelDesc(8*sizeof(int16_t), 0, 0, 0, cudaChannelFormatKindSigned);
gradXResourceDesc.res.pitch2D.width = currentImageGradX.base.width;
gradXResourceDesc.res.pitch2D.height = currentImageGradX.base.height;
gradXResourceDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImageGradX.planes[0].pitch_in_bytes);
cudaResourceDesc gradYResourceDesc{};
gradYResourceDesc.resType = cudaResourceTypePitch2D;
gradYResourceDesc.res.pitch2D.devPtr = currentImageGradY.planes[0].dev_ptr;
gradYResourceDesc.res.pitch2D.desc = cudaCreateChannelDesc(8*sizeof(int16_t), 0, 0, 0, cudaChannelFormatKindSigned);
gradYResourceDesc.res.pitch2D.width = currentImageGradY.base.width;
gradYResourceDesc.res.pitch2D.height = currentImageGradY.base.height;
gradYResourceDesc.res.pitch2D.pitchInBytes = static_cast<size_t>(currentImageGradY.planes[0].pitch_in_bytes);
cudaTextureDesc textureDesc{};
textureDesc.addressMode[0] = cudaAddressModeClamp;
textureDesc.addressMode[1] = cudaAddressModeClamp;
textureDesc.filterMode = cudaFilterModeLinear;
textureDesc.readMode = cudaReadModeNormalizedFloat;
cudaTextureObject_t currentImageTextureObject;
cudaTextureObject_t gradXTextureObject;
cudaTextureObject_t gradYTextureObject;
CUDA_SAFE_CALL( cudaCreateTextureObject(¤tImageTextureObject, ¤tImageDesc, &textureDesc, nullptr) );
CUDA_SAFE_CALL( cudaCreateTextureObject(&gradXTextureObject, &gradXResourceDesc, &textureDesc, nullptr) );
CUDA_SAFE_CALL( cudaCreateTextureObject(&gradYTextureObject, &gradYResourceDesc, &textureDesc, nullptr) );
const dim3 jacobianBlockDim(1024, 1, 1);
const dim3 jacobianGridDim(nPoints / jacobianBlockDim.x + (nPoints % jacobianBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 reductionBlockDim(1024, 1, 1);
const dim3 reductionGridDim(nPoints / reductionBlockDim.x + (nPoints % reductionBlockDim.x == 0 ? 0 : 1), 1, 1);
assert(nPoints <= reductionBlockDim.x * reductionGridDim.x);
const dim3 stdDevBlockDim(1024, 1, 1);
const dim3 stdDebGridDim(nPoints / stdDevBlockDim.x + (nPoints % stdDevBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 huberBlockDim(1024, 1, 1);
const dim3 huberGridDim(nPoints / huberBlockDim.x + (nPoints % huberBlockDim.x == 0 ? 0 : 1), 1, 1);
const dim3 weightedJacobianBlockDim(1024, 1, 1);
const dim3 weightedJacobianGridDim(nPoints / weightedJacobianBlockDim.x + (nPoints % weightedJacobianBlockDim.x == 0 ? 0 : 1), 1, 1);
double stepLength = alignmentSettings.initialStepLength;
for (int iteration = 0; iteration < alignmentSettings.maxIterations; ++iteration)
{
const Sophus::SE3d currentPose = Sophus::SE3d::exp(xi);
calculateJacobianAndResidual<<<jacobianGridDim, jacobianBlockDim, 0, stream>>>(
currentPose.rotationMatrix(), currentPose.translation(),
currentImageTextureObject,
gradXTextureObject, gradYTextureObject,
currentImageGradX.base.width, currentImageGradX.base.height,
levelIntrinsics,
reinterpret_cast<DVOPoint*>(pointArray.dev_ptr), nPoints,
alignImageBuffers.d_J, alignImageBuffers.d_residual, alignImageBuffers.d_nActivePoints);
// Sum residuals
deviceReduceSum<<<reductionGridDim, reductionBlockDim, 0, stream>>>(alignImageBuffers.d_residual, alignImageBuffers.d_tmpSumArray, nPoints);
deviceReduceSum<<<1, 1024, 0, stream>>>(alignImageBuffers.d_tmpSumArray, alignImageBuffers.d_tmpSumValue, nPoints); // Store sum in d_tmpSumValue
// Store squared difference from mean in d_weights
calculateStdDev<<<stdDebGridDim, stdDevBlockDim, 0, stream>>>(alignImageBuffers.d_residual, alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumValue, nPoints, alignImageBuffers.d_nActivePoints);
// Sum squared differences
deviceReduceSum<<<reductionGridDim, reductionBlockDim, 0, stream>>>(alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumArray, nPoints);
deviceReduceSum<<<1, 1024, 0, stream>>>(alignImageBuffers.d_tmpSumArray, alignImageBuffers.d_tmpSumValue, nPoints); // Store sum in d_tmpSumValue
calculateHuberWeights<<<huberGridDim, huberBlockDim, 0, stream>>>(alignImageBuffers.d_residual, alignImageBuffers.d_weights, alignImageBuffers.d_tmpSumValue, nPoints, alignImageBuffers.d_nActivePoints);
calculateWeightedJacobian<<<weightedJacobianGridDim, weightedJacobianBlockDim, 0, stream>>>(alignImageBuffers.d_Jw, alignImageBuffers.d_J, alignImageBuffers.d_weights, nPoints);
const double identityScalar = 1.0;
const double zeroScalar = 0.0;
cublasDgemm_v2(cublasHandle,
CUBLAS_OP_T, CUBLAS_OP_N,
6, 6, nPoints,
&identityScalar,
alignImageBuffers.d_Jw, nPoints,
alignImageBuffers.d_J, nPoints,
&zeroScalar,
alignImageBuffers.d_A, 6);
const double negativeIdentityScalar = -1.0;
cublasDgemm_v2(cublasHandle,
CUBLAS_OP_T, CUBLAS_OP_N,
6, 1, nPoints,
&negativeIdentityScalar,
alignImageBuffers.d_Jw, nPoints,
alignImageBuffers.d_residual, nPoints,
&zeroScalar,
alignImageBuffers.d_b, 6);
Matrix6d A;
cudaMemcpyAsync(static_cast<void*>(A.data()), static_cast<void*>(alignImageBuffers.d_A), 6*6*sizeof(double), cudaMemcpyDeviceToHost, stream);
Vector6d b;
cudaMemcpyAsync(static_cast<void*>(b.data()), static_cast<void*>(alignImageBuffers.d_b), 6*sizeof(double), cudaMemcpyDeviceToHost, stream);
CUDA_SAFE_CALL( cudaStreamSynchronize(stream) );
const Vector6d xi_delta = stepLength * (A + sigma_inv).ldlt().solve(b + sigma_inv*(initialXi - xi));
if (std::isnan(xi_delta.sum()))
{
std::cerr << "============= XI is NAN =============\n";
return Vector6d::Zero();
}
xi = Sophus::SE3d::log(currentPose * Sophus::SE3d::exp(xi_delta));
alignmentStatistics.iterationPerLevel.at(level)++;
const double xi_delta_norm = xi_delta.norm();
if ((1.0/stepLength) * xi_delta_norm <= alignmentSettings.xiConvergenceLength) { break; }
if (stepLength > alignmentSettings.minStepLength) { stepLength *= alignmentSettings.stepLengthReductionFactor; }
}
CUDA_SAFE_CALL( cudaDestroyTextureObject(gradXTextureObject) );
CUDA_SAFE_CALL( cudaDestroyTextureObject(gradYTextureObject) );
}
cublasDestroy_v2(cublasHandle);
CUDA_SAFE_CALL( cudaStreamDestroy(stream) );
auto end = std::chrono::steady_clock::now();
alignmentStatistics.elapsedMicroseconds = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
return xi;
}
|
74eb8065106725cebc138e27d5b94947fe8a9b1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudakernel/memory/concat.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#define NHWC_ALIGNED_AXIS (8)
template <typename T>
__global__ void ppl_cukernel_concat(
int64_t num_elems,
const T* inputs,
int64_t concat_size,
int64_t top_axis_width,
DivModFast num_elems_inner_fast,
int axis_offset,
T* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int outer_idx, inner_idx;
num_elems_inner_fast.divmod(i, outer_idx, inner_idx);
int64_t top_idx = inner_idx +
(outer_idx * top_axis_width + axis_offset) * concat_size;
output[top_idx] = inputs[i];
}
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_two_inputs(
int64_t num_elems,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int tid = threadIdx.x;
__shared__ T1 buffer[2 * 256];
buffer[2 * tid] = input0[i]; buffer[2 * tid + 1] = input1[i];
T2 *buffer_ptr = reinterpret_cast<T2*>(buffer);
output[i] = buffer_ptr[tid];
}
}
template<typename T>
__global__ void ppl_cukernel_concat_nhwc(
int64_t num_elems,
int num_dims,
int nhwc_axis,
int axis_offset,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> input_padded_strides,
GArray<int64_t> output_padded_strides,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int64_t output_offset = 0, input_offset = 0;
int idx, remain = index;
for(int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
input_offset += idx * input_padded_strides[it];
idx = (it == nhwc_axis) ? idx + axis_offset : idx;
output_offset += idx * output_padded_strides[it];
}
output[output_offset] = input[input_offset];
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_nhwc_two_inputs(
int64_t num_elems,
int inner_dims,
int axis_width0,
int axis_width1,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int inner_idx = i % inner_dims;
int outer_idx = i / inner_dims;
if (inner_idx >= axis_width0) {
int input_offset = outer_idx * axis_width1 + (inner_idx - axis_width0);
output[i] = input1[input_offset];
} else {
int input_offset = outer_idx * axis_width0 + inner_idx;
output[i] = input0[input_offset];
}
}
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_nhwc_two_inputs(
int64_t num_elems,
int inner_dims,
int pad_inner_dims,
int axis_width0,
int pad_axis_width0,
int axis_width1,
int pad_axis_width1,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int inner_idx = i % pad_inner_dims;
int outer_idx = i / pad_inner_dims;
// int output_offset = outer_idx * pad_inner_dims + inner_idx;
if (inner_idx >= axis_width0) {
int axis_offset = inner_idx - axis_width0;
int input_offset = outer_idx * pad_axis_width1 + axis_offset;
output[i] = axis_offset >= axis_width1 ? 0 : input1[input_offset];
} else {
int axis_offset = inner_idx;
int input_offset = outer_idx * pad_axis_width0 + axis_offset;
output[i] = axis_offset >= axis_width0 ? 0 : input0[input_offset];
}
}
}
template <typename T>
__global__ void ppl_cukernel_concat_nhwc_nopadding(
int64_t num_elems,
const T* inputs,
int64_t concat_size,
int64_t top_axis_width,
DivModFast num_elems_inner_fast,
int axis_offset,
T* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int outer_idx, inner_idx;
num_elems_inner_fast.divmod(i, outer_idx, inner_idx);
int64_t top_idx = inner_idx + (outer_idx * top_axis_width + axis_offset);
output[top_idx] = inputs[i];
}
}
bool IsConcatNoPadding(
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
ppl::nn::TensorShape* output_shape,
int mask)
{
if (output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NHWC || axis != 1)
return false;
for (int i = 0; i < num_inputs; i++) {
if (input_padded_dims[i][axis] - input_dims[i][axis] != 0) return false;
}
return true;
}
ppl::common::RetCode PPLCUDAConcatNoPaddingForwardImp(
hipStream_t stream,
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
const void* inputs[],
ppl::nn::TensorShape* output_shape,
void* output,
int mask)
{
int64_t num_elems = output_shape->GetElementsIncludingPadding() / output_shape->GetDim(axis);
int64_t output_axis_width = output_shape->GetDim(axis);
int64_t axis_offset = 0;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
int input_axis_width = input_dims[j][axis]; \
int num_in_elems = num_elems * input_axis_width; \
if (!(mask & (1 << j))) { \
if (num_in_elems > 0) { \
DivModFast num_elems_inner_fast = DivModFast(input_axis_width); \
int block_size = 256; \
int grid_size = (num_in_elems + block_size - 1) / block_size; \
hipLaunchKernelGGL(( ppl_cukernel_concat_nhwc_nopadding), dim3(grid_size), dim3(block_size), 0, stream, num_in_elems, \
(const TYPE*)inputs[j], num_in_elems, output_axis_width, num_elems_inner_fast, \
axis_offset, (TYPE*)output); \
} \
} \
axis_offset += input_axis_width; \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
ppl::common::RetCode PPLCUDAConcatForwardImp(
hipStream_t stream,
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
const void* inputs[],
ppl::nn::TensorShape* output_shape,
void* output,
int mask)
{
if (IsConcatNoPadding(axis, num_inputs, input_dims, input_padded_dims, output_shape, mask)) {
return PPLCUDAConcatNoPaddingForwardImp(stream, axis, num_inputs, input_dims,
input_padded_dims, inputs, output_shape, output, mask);
}
int num_dims = output_shape->GetDimCount();
int output_elems = output_shape->GetElementsIncludingPadding();
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
if (num_inputs == 2 && axis == (num_dims - 1) && input_dims[0][axis] == 1 && input_dims[1][axis] == 1) {
int num_elems = 1;
for(int it = 0; it < num_dims; num_elems *= input_dims[0][it], ++it);
#define SWITCH_CASE(TYPE1, TYPE2) \
case sizeof(TYPE1): \
{ \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
hipLaunchKernelGGL(( ppl_cukernel_concat_two_inputs), dim3(grid_size), dim3(block_size), 0, stream, num_elems, \
(const TYPE1*)inputs[0], (const TYPE1*)inputs[1], (TYPE2*)output); \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t, int16_t);
SWITCH_CASE(int16_t, int32_t);
SWITCH_CASE(int32_t, int64_t);
SWITCH_CASE(int64_t, float4);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} else {
int64_t concat_size = 1;
int64_t num_concats = 1;
for (int i = num_dims - 1; i > axis; --i)
concat_size *= input_dims[0][i];
for (int i = 0; i < axis; ++i)
num_concats *= input_dims[0][i];
int axis_offset = 0;
int output_axis_width = output_shape->GetDim(axis);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
int input_axis_width = input_dims[j][axis]; \
if (!(mask & (1 << j))) { \
int64_t input_concat_size = input_axis_width * concat_size; \
int64_t num_elems = input_concat_size * num_concats; \
if (num_elems > 0) { \
DivModFast num_elems_inner_fast = DivModFast(input_concat_size); \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
hipLaunchKernelGGL(( ppl_cukernel_concat), dim3(grid_size), dim3(block_size), 0, stream, num_elems, \
(const TYPE*)inputs[j], concat_size, output_axis_width, num_elems_inner_fast, \
axis_offset, (TYPE*)output); \
} \
} \
axis_offset += input_axis_width; \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
} else if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
// nhwc, axis == 1 means last dim
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16 && num_inputs == 2 &&
axis == 1) {
if (!(input_dims[0][axis] & 0x7) && !(input_dims[1][axis] & 0x7)) {
int block_size = 256;
int channel_shift = 3;
int grid_size = ((output_elems >> channel_shift) + block_size - 1) / block_size;
int axis_width0 = input_dims[0][axis] >> channel_shift;
int axis_width1 = input_dims[1][axis] >> channel_shift;
int inner_dims = axis_width0 + axis_width1;
hipLaunchKernelGGL(( ppl_cukernel_concat_nhwc_two_inputs), dim3(grid_size), dim3(block_size), 0, stream, output_elems >> channel_shift,
inner_dims, axis_width0, axis_width1, (const float4*)inputs[0], (const float4*)inputs[1], (float4*)output);
} else {
int block_size = 256;
int grid_size = (output_elems + block_size - 1) / block_size;
int axis_width0 = input_dims[0][axis];
int pad_axis_width0 = Align(axis_width0, NHWC_ALIGNED_AXIS);
int axis_width1 = input_dims[1][axis];
int pad_axis_width1 = Align(axis_width1, NHWC_ALIGNED_AXIS);
int inner_dims = axis_width0 + axis_width1;
int pad_inner_dims = Align(inner_dims, NHWC_ALIGNED_AXIS);
hipLaunchKernelGGL(( ppl_cukernel_concat_nhwc_two_inputs), dim3(grid_size), dim3(block_size), 0, stream, output_elems,
inner_dims, pad_inner_dims, axis_width0, pad_axis_width0, axis_width1, pad_axis_width1,
(const int16_t*)inputs[0], (const int16_t*)inputs[1], (int16_t*)output);
}
return ppl::common::RC_SUCCESS;
}
int axis_offset = 0;
std::vector<int32_t> nhwc_output_padded_dims(num_dims);
nhwc_output_padded_dims[num_dims - 1] = output_shape->GetDim(1) +
output_shape->GetPadding0(1) + output_shape->GetPadding1(1);
int jump_step = 0;
for(int it = 0; it < num_dims - 1; ++it) {
if (it == 1) jump_step = 1;
nhwc_output_padded_dims[it] = output_shape->GetDim(it + jump_step);
}
GArray<int64_t> output_padded_strides(num_dims);
int64_t acc_output_stride = 1;
for(int it = num_dims - 1; it >= 0; --it) {
output_padded_strides[it] = acc_output_stride;
acc_output_stride *= nhwc_output_padded_dims[it];
}
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
if (!(mask & (1 << j))) { \
int nhwc_axis = (axis == 1) ? num_dims - 1 : axis - 1; \
nhwc_axis = (axis == 0) ? 0 : nhwc_axis; \
std::vector<int32_t> nhwc_input_dims(num_dims); \
std::vector<int32_t> nhwc_input_padded_dims(num_dims); \
nhwc_input_dims[num_dims - 1] = input_dims[j][1]; \
nhwc_input_padded_dims[num_dims - 1] = input_padded_dims[j][1]; \
jump_step = 0; \
for(int it = 0; it < num_dims - 1; ++it) { \
if (it == 1) jump_step = 1; \
nhwc_input_dims[it] = input_dims[j][it + jump_step]; \
nhwc_input_padded_dims[it] = input_padded_dims[j][it + jump_step]; \
} \
GArray<DivModFast> input_strides_fast(num_dims); \
GArray<int64_t> input_padded_strides(num_dims); \
int64_t acc_input_stride = 1, acc_input_padded_stride = 1; \
for(int it = num_dims - 1; it >= 0; --it) { \
input_strides_fast[it] = DivModFast(acc_input_stride); \
input_padded_strides[it] = acc_input_padded_stride; \
acc_input_stride *= nhwc_input_dims[it]; \
acc_input_padded_stride *= nhwc_input_padded_dims[it]; \
} \
int input_axis_width = nhwc_input_dims[nhwc_axis]; \
int64_t num_elems = 1; \
for(int it = 0; it < num_dims; ++it) num_elems *= nhwc_input_dims[it]; \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
hipLaunchKernelGGL(( ppl_cukernel_concat_nhwc), dim3(grid_size), dim3(block_size), 0, stream, \
num_elems, num_dims, nhwc_axis, axis_offset, \
input_strides_fast, input_padded_strides, \
output_padded_strides, (const TYPE*)inputs[j], (TYPE*)output); \
axis_offset += input_axis_width; \
} \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
| 74eb8065106725cebc138e27d5b94947fe8a9b1a.cu | #include "cudakernel/memory/concat.h"
#include "cudakernel/common/divmod_fast.h"
#include "cudakernel/common/memory_utils.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#define NHWC_ALIGNED_AXIS (8)
template <typename T>
__global__ void ppl_cukernel_concat(
int64_t num_elems,
const T* inputs,
int64_t concat_size,
int64_t top_axis_width,
DivModFast num_elems_inner_fast,
int axis_offset,
T* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int outer_idx, inner_idx;
num_elems_inner_fast.divmod(i, outer_idx, inner_idx);
int64_t top_idx = inner_idx +
(outer_idx * top_axis_width + axis_offset) * concat_size;
output[top_idx] = inputs[i];
}
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_two_inputs(
int64_t num_elems,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int tid = threadIdx.x;
__shared__ T1 buffer[2 * 256];
buffer[2 * tid] = input0[i]; buffer[2 * tid + 1] = input1[i];
T2 *buffer_ptr = reinterpret_cast<T2*>(buffer);
output[i] = buffer_ptr[tid];
}
}
template<typename T>
__global__ void ppl_cukernel_concat_nhwc(
int64_t num_elems,
int num_dims,
int nhwc_axis,
int axis_offset,
GArray<DivModFast> input_strides_fast,
GArray<int64_t> input_padded_strides,
GArray<int64_t> output_padded_strides,
const T* input,
T* output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems) return;
int64_t output_offset = 0, input_offset = 0;
int idx, remain = index;
for(int it = 0; it < num_dims; ++it) {
input_strides_fast[it].divmod(remain, idx, remain);
input_offset += idx * input_padded_strides[it];
idx = (it == nhwc_axis) ? idx + axis_offset : idx;
output_offset += idx * output_padded_strides[it];
}
output[output_offset] = input[input_offset];
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_nhwc_two_inputs(
int64_t num_elems,
int inner_dims,
int axis_width0,
int axis_width1,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int inner_idx = i % inner_dims;
int outer_idx = i / inner_dims;
if (inner_idx >= axis_width0) {
int input_offset = outer_idx * axis_width1 + (inner_idx - axis_width0);
output[i] = input1[input_offset];
} else {
int input_offset = outer_idx * axis_width0 + inner_idx;
output[i] = input0[input_offset];
}
}
}
template <typename T1, typename T2>
__global__ void __launch_bounds__(256) ppl_cukernel_concat_nhwc_two_inputs(
int64_t num_elems,
int inner_dims,
int pad_inner_dims,
int axis_width0,
int pad_axis_width0,
int axis_width1,
int pad_axis_width1,
const T1* input0,
const T1* input1,
T2* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int inner_idx = i % pad_inner_dims;
int outer_idx = i / pad_inner_dims;
// int output_offset = outer_idx * pad_inner_dims + inner_idx;
if (inner_idx >= axis_width0) {
int axis_offset = inner_idx - axis_width0;
int input_offset = outer_idx * pad_axis_width1 + axis_offset;
output[i] = axis_offset >= axis_width1 ? 0 : input1[input_offset];
} else {
int axis_offset = inner_idx;
int input_offset = outer_idx * pad_axis_width0 + axis_offset;
output[i] = axis_offset >= axis_width0 ? 0 : input0[input_offset];
}
}
}
template <typename T>
__global__ void ppl_cukernel_concat_nhwc_nopadding(
int64_t num_elems,
const T* inputs,
int64_t concat_size,
int64_t top_axis_width,
DivModFast num_elems_inner_fast,
int axis_offset,
T* output)
{
for (int64_t i = (int64_t)blockIdx.x * blockDim.x + threadIdx.x;
i < num_elems;
i += (int64_t)blockDim.x * gridDim.x) {
int outer_idx, inner_idx;
num_elems_inner_fast.divmod(i, outer_idx, inner_idx);
int64_t top_idx = inner_idx + (outer_idx * top_axis_width + axis_offset);
output[top_idx] = inputs[i];
}
}
bool IsConcatNoPadding(
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
ppl::nn::TensorShape* output_shape,
int mask)
{
if (output_shape->GetDataFormat() != ppl::common::DATAFORMAT_NHWC || axis != 1)
return false;
for (int i = 0; i < num_inputs; i++) {
if (input_padded_dims[i][axis] - input_dims[i][axis] != 0) return false;
}
return true;
}
ppl::common::RetCode PPLCUDAConcatNoPaddingForwardImp(
cudaStream_t stream,
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
const void* inputs[],
ppl::nn::TensorShape* output_shape,
void* output,
int mask)
{
int64_t num_elems = output_shape->GetElementsIncludingPadding() / output_shape->GetDim(axis);
int64_t output_axis_width = output_shape->GetDim(axis);
int64_t axis_offset = 0;
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
int input_axis_width = input_dims[j][axis]; \
int num_in_elems = num_elems * input_axis_width; \
if (!(mask & (1 << j))) { \
if (num_in_elems > 0) { \
DivModFast num_elems_inner_fast = DivModFast(input_axis_width); \
int block_size = 256; \
int grid_size = (num_in_elems + block_size - 1) / block_size; \
ppl_cukernel_concat_nhwc_nopadding<<<grid_size, block_size, 0, stream>>>(num_in_elems, \
(const TYPE*)inputs[j], num_in_elems, output_axis_width, num_elems_inner_fast, \
axis_offset, (TYPE*)output); \
} \
} \
axis_offset += input_axis_width; \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
ppl::common::RetCode PPLCUDAConcatForwardImp(
cudaStream_t stream,
int axis,
int num_inputs,
int* input_dims[],
int* input_padded_dims[],
const void* inputs[],
ppl::nn::TensorShape* output_shape,
void* output,
int mask)
{
if (IsConcatNoPadding(axis, num_inputs, input_dims, input_padded_dims, output_shape, mask)) {
return PPLCUDAConcatNoPaddingForwardImp(stream, axis, num_inputs, input_dims,
input_padded_dims, inputs, output_shape, output, mask);
}
int num_dims = output_shape->GetDimCount();
int output_elems = output_shape->GetElementsIncludingPadding();
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
if (num_inputs == 2 && axis == (num_dims - 1) && input_dims[0][axis] == 1 && input_dims[1][axis] == 1) {
int num_elems = 1;
for(int it = 0; it < num_dims; num_elems *= input_dims[0][it], ++it);
#define SWITCH_CASE(TYPE1, TYPE2) \
case sizeof(TYPE1): \
{ \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
ppl_cukernel_concat_two_inputs<<<grid_size, block_size, 0, stream>>>(num_elems, \
(const TYPE1*)inputs[0], (const TYPE1*)inputs[1], (TYPE2*)output); \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t, int16_t);
SWITCH_CASE(int16_t, int32_t);
SWITCH_CASE(int32_t, int64_t);
SWITCH_CASE(int64_t, float4);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} else {
int64_t concat_size = 1;
int64_t num_concats = 1;
for (int i = num_dims - 1; i > axis; --i)
concat_size *= input_dims[0][i];
for (int i = 0; i < axis; ++i)
num_concats *= input_dims[0][i];
int axis_offset = 0;
int output_axis_width = output_shape->GetDim(axis);
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
int input_axis_width = input_dims[j][axis]; \
if (!(mask & (1 << j))) { \
int64_t input_concat_size = input_axis_width * concat_size; \
int64_t num_elems = input_concat_size * num_concats; \
if (num_elems > 0) { \
DivModFast num_elems_inner_fast = DivModFast(input_concat_size); \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
ppl_cukernel_concat<<<grid_size, block_size, 0, stream>>>(num_elems, \
(const TYPE*)inputs[j], concat_size, output_axis_width, num_elems_inner_fast, \
axis_offset, (TYPE*)output); \
} \
} \
axis_offset += input_axis_width; \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
}
} else if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
// nhwc, axis == 1 means last dim
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16 && num_inputs == 2 &&
axis == 1) {
if (!(input_dims[0][axis] & 0x7) && !(input_dims[1][axis] & 0x7)) {
int block_size = 256;
int channel_shift = 3;
int grid_size = ((output_elems >> channel_shift) + block_size - 1) / block_size;
int axis_width0 = input_dims[0][axis] >> channel_shift;
int axis_width1 = input_dims[1][axis] >> channel_shift;
int inner_dims = axis_width0 + axis_width1;
ppl_cukernel_concat_nhwc_two_inputs<<<grid_size, block_size, 0, stream>>>(output_elems >> channel_shift,
inner_dims, axis_width0, axis_width1, (const float4*)inputs[0], (const float4*)inputs[1], (float4*)output);
} else {
int block_size = 256;
int grid_size = (output_elems + block_size - 1) / block_size;
int axis_width0 = input_dims[0][axis];
int pad_axis_width0 = Align(axis_width0, NHWC_ALIGNED_AXIS);
int axis_width1 = input_dims[1][axis];
int pad_axis_width1 = Align(axis_width1, NHWC_ALIGNED_AXIS);
int inner_dims = axis_width0 + axis_width1;
int pad_inner_dims = Align(inner_dims, NHWC_ALIGNED_AXIS);
ppl_cukernel_concat_nhwc_two_inputs<<<grid_size, block_size, 0, stream>>>(output_elems,
inner_dims, pad_inner_dims, axis_width0, pad_axis_width0, axis_width1, pad_axis_width1,
(const int16_t*)inputs[0], (const int16_t*)inputs[1], (int16_t*)output);
}
return ppl::common::RC_SUCCESS;
}
int axis_offset = 0;
std::vector<int32_t> nhwc_output_padded_dims(num_dims);
nhwc_output_padded_dims[num_dims - 1] = output_shape->GetDim(1) +
output_shape->GetPadding0(1) + output_shape->GetPadding1(1);
int jump_step = 0;
for(int it = 0; it < num_dims - 1; ++it) {
if (it == 1) jump_step = 1;
nhwc_output_padded_dims[it] = output_shape->GetDim(it + jump_step);
}
GArray<int64_t> output_padded_strides(num_dims);
int64_t acc_output_stride = 1;
for(int it = num_dims - 1; it >= 0; --it) {
output_padded_strides[it] = acc_output_stride;
acc_output_stride *= nhwc_output_padded_dims[it];
}
#define SWITCH_CASE(TYPE) \
case sizeof(TYPE): \
{ \
for (int j = 0; j < num_inputs; ++j) { \
if (!(mask & (1 << j))) { \
int nhwc_axis = (axis == 1) ? num_dims - 1 : axis - 1; \
nhwc_axis = (axis == 0) ? 0 : nhwc_axis; \
std::vector<int32_t> nhwc_input_dims(num_dims); \
std::vector<int32_t> nhwc_input_padded_dims(num_dims); \
nhwc_input_dims[num_dims - 1] = input_dims[j][1]; \
nhwc_input_padded_dims[num_dims - 1] = input_padded_dims[j][1]; \
jump_step = 0; \
for(int it = 0; it < num_dims - 1; ++it) { \
if (it == 1) jump_step = 1; \
nhwc_input_dims[it] = input_dims[j][it + jump_step]; \
nhwc_input_padded_dims[it] = input_padded_dims[j][it + jump_step]; \
} \
GArray<DivModFast> input_strides_fast(num_dims); \
GArray<int64_t> input_padded_strides(num_dims); \
int64_t acc_input_stride = 1, acc_input_padded_stride = 1; \
for(int it = num_dims - 1; it >= 0; --it) { \
input_strides_fast[it] = DivModFast(acc_input_stride); \
input_padded_strides[it] = acc_input_padded_stride; \
acc_input_stride *= nhwc_input_dims[it]; \
acc_input_padded_stride *= nhwc_input_padded_dims[it]; \
} \
int input_axis_width = nhwc_input_dims[nhwc_axis]; \
int64_t num_elems = 1; \
for(int it = 0; it < num_dims; ++it) num_elems *= nhwc_input_dims[it]; \
int block_size = 256; \
int grid_size = (num_elems + block_size - 1) / block_size; \
ppl_cukernel_concat_nhwc<<<grid_size, block_size, 0, stream>>>( \
num_elems, num_dims, nhwc_axis, axis_offset, \
input_strides_fast, input_padded_strides, \
output_padded_strides, (const TYPE*)inputs[j], (TYPE*)output); \
axis_offset += input_axis_width; \
} \
} \
return ppl::common::RC_SUCCESS; \
}
switch(ppl::common::GetSizeOfDataType(output_shape->GetDataType())) {
SWITCH_CASE(int8_t);
SWITCH_CASE(int16_t);
SWITCH_CASE(int32_t);
SWITCH_CASE(int64_t);
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef SWITCH_CASE
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
|
cb5bb6e41dbc402b83220606d466bc44143e8a49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucket_sort(int *key, int* bucket){
atomicAdd(&bucket[key[threadIdx.x]], 1);
__syncthreads();
int buck_val = 0;
for (int i = threadIdx.x; i >= bucket[buck_val]; i-=bucket[buck_val++]);
key[threadIdx.x] = buck_val;
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
hipMallocManaged(&key, n*sizeof(int));
hipMallocManaged(&bucket, range*sizeof(int));
// initialize to 0
hipMemset(bucket, 0, range);
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
//only launching 1 block because n is small, could be extended but would need cooperative_groups for grid synchronization
hipLaunchKernelGGL(( bucket_sort), dim3(1), dim3(n), 0, 0, key, bucket);
hipDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
/*for (int i=0; i<range; i++) {
printf("%d ",bucket[i]);
}
printf("\n");*/
hipFree(key);
hipFree(bucket);
}
| cb5bb6e41dbc402b83220606d466bc44143e8a49.cu | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucket_sort(int *key, int* bucket){
atomicAdd(&bucket[key[threadIdx.x]], 1);
__syncthreads();
int buck_val = 0;
for (int i = threadIdx.x; i >= bucket[buck_val]; i-=bucket[buck_val++]);
key[threadIdx.x] = buck_val;
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
cudaMallocManaged(&key, n*sizeof(int));
cudaMallocManaged(&bucket, range*sizeof(int));
// initialize to 0
cudaMemset(bucket, 0, range);
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
//only launching 1 block because n is small, could be extended but would need cooperative_groups for grid synchronization
bucket_sort<<<1, n>>>(key, bucket);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
/*for (int i=0; i<range; i++) {
printf("%d ",bucket[i]);
}
printf("\n");*/
cudaFree(key);
cudaFree(bucket);
}
|
52615d98fafc39821036cb5010d7e1f74f7124e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
*/
#include "dynamicconv_cuda.cuh"
#include "dynamicconv_cuda_forward.cu"
#include "dynamicconv_cuda_backward.cu"
#include "../cuda_utils.cu"
// FS is filter size and kernels are specialized for filter sizes
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_forward_kernel(const scalar_t* input,
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int head = featureIdx / numFiltersInBlock;
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
scalar_t filter[FS];
__shared__ scalar_t tempInput[SB + FS];
zeroSharedMem<FS, SB, padding_l>(tempInput);
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
__syncthreads();
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, i,
numIterations, false, tempInput);
__syncthreads();
if (inputOffset + tid < sequenceLength) {
#pragma unroll
for (int k = 0; k < FS; ++k) {
const int filterOffset = batchIdx * numHeads * FS * sequenceLength
+ head * FS * sequenceLength
+ k * sequenceLength
+ i * SB + tid;
filter[k] = weight[filterOffset];
}
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
out += filter[k] * tempInput[tid + k];
}
outputFeature[inputOffset + tid] = out;
}
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_backward_kernel(
const scalar_t* gradOutput, // B * C * T
const scalar_t* input, // B * C * T
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* gradWeight,
scalar_t* gradInput) { // B * H * k * T
assert(blockDim.x == SB);
// each block operates on a single batch and filter head
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int headIdx = blockIdx.y;
const int chunkIdx = blockIdx.z;
const int numChunks = divUp<int, int>(sequenceLength, SB);
const int inputOffset = chunkIdx * SB;
// initialize shared memory for output gradient and input
__shared__ scalar_t tempGradOutput[SB + FS];
__shared__ scalar_t tempInput[SB + FS];
const int padding = FS - padding_l - 1;
zeroSharedMem<FS, SB, padding>(tempGradOutput);
zeroSharedMem<FS, SB, padding_l>(tempInput);
// initialize local filter and weight gradient sum arrays
scalar_t tempGradSum[FS];
scalar_t bfilter[FS];
for (int k = 0; k < FS; ++k) {
tempGradSum[k] = scalar_t(0.0);
int idxOffset = inputOffset + tid + k - padding;
if (idxOffset >= 0 && idxOffset < sequenceLength) {
int bfilterOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength
+ (FS - k - 1) * sequenceLength
+ idxOffset;
bfilter[k] = weight[bfilterOffset];
} else {
bfilter[k] = scalar_t(0.0);
}
}
// iterate over filter block
for (int featureIdx = 0; featureIdx < numFiltersInBlock; ++featureIdx) {
__syncthreads();
// load input and output gradient for this channel and chunk
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ (headIdx * numFiltersInBlock + featureIdx) * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
const scalar_t* gradOutputFeature = &gradOutput[IOOffset];
scalar_t* gradInputFeature = &gradInput[IOOffset];
load_input_to_shared<FS, SB, padding>(gradOutputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempGradOutput);
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempInput);
__syncthreads();
// sum input and weight gradients
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
tempGradSum[k] += tempInput[tid + k] * tempGradOutput[tid + padding];
out += bfilter[k] * tempGradOutput[tid + k];
}
if (inputOffset + tid < sequenceLength) {
gradInputFeature[inputOffset + tid] = out;
}
}
const int gradOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength;
scalar_t *gradWeightFeature = &gradWeight[gradOffset];
// write weight gradient
if (inputOffset + tid < sequenceLength) {
for (int k = 0; k < FS; ++k) {
const int outputOffset = k * sequenceLength + inputOffset + tid;
gradWeightFeature[outputOffset] = tempGradSum[k];
}
}
}
| 52615d98fafc39821036cb5010d7e1f74f7124e4.cu | /**
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
*/
#include "dynamicconv_cuda.cuh"
#include "dynamicconv_cuda_forward.cu"
#include "dynamicconv_cuda_backward.cu"
#include "../cuda_utils.cu"
// FS is filter size and kernels are specialized for filter sizes
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_forward_kernel(const scalar_t* input,
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* output) {
assert(blockDim.x == SB);
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int featureIdx = blockIdx.y;
const int head = featureIdx / numFiltersInBlock;
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ featureIdx * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
scalar_t* outputFeature = &output[IOOffset];
scalar_t filter[FS];
__shared__ scalar_t tempInput[SB + FS];
zeroSharedMem<FS, SB, padding_l>(tempInput);
const int numIterations = divUp<int, int>(sequenceLength, SB);
for (int i = 0; i < numIterations; ++i) {
__syncthreads();
const int inputOffset = i * SB;
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, i,
numIterations, false, tempInput);
__syncthreads();
if (inputOffset + tid < sequenceLength) {
#pragma unroll
for (int k = 0; k < FS; ++k) {
const int filterOffset = batchIdx * numHeads * FS * sequenceLength
+ head * FS * sequenceLength
+ k * sequenceLength
+ i * SB + tid;
filter[k] = weight[filterOffset];
}
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
out += filter[k] * tempInput[tid + k];
}
outputFeature[inputOffset + tid] = out;
}
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__global__
void dynamicconv_backward_kernel(
const scalar_t* gradOutput, // B * C * T
const scalar_t* input, // B * C * T
const scalar_t* weight,
int minibatch,
int sequenceLength,
int numFeatures,
int numFiltersInBlock,
int numHeads,
scalar_t* gradWeight,
scalar_t* gradInput) { // B * H * k * T
assert(blockDim.x == SB);
// each block operates on a single batch and filter head
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int headIdx = blockIdx.y;
const int chunkIdx = blockIdx.z;
const int numChunks = divUp<int, int>(sequenceLength, SB);
const int inputOffset = chunkIdx * SB;
// initialize shared memory for output gradient and input
__shared__ scalar_t tempGradOutput[SB + FS];
__shared__ scalar_t tempInput[SB + FS];
const int padding = FS - padding_l - 1;
zeroSharedMem<FS, SB, padding>(tempGradOutput);
zeroSharedMem<FS, SB, padding_l>(tempInput);
// initialize local filter and weight gradient sum arrays
scalar_t tempGradSum[FS];
scalar_t bfilter[FS];
for (int k = 0; k < FS; ++k) {
tempGradSum[k] = scalar_t(0.0);
int idxOffset = inputOffset + tid + k - padding;
if (idxOffset >= 0 && idxOffset < sequenceLength) {
int bfilterOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength
+ (FS - k - 1) * sequenceLength
+ idxOffset;
bfilter[k] = weight[bfilterOffset];
} else {
bfilter[k] = scalar_t(0.0);
}
}
// iterate over filter block
for (int featureIdx = 0; featureIdx < numFiltersInBlock; ++featureIdx) {
__syncthreads();
// load input and output gradient for this channel and chunk
const int IOOffset = batchIdx * numFeatures * sequenceLength
+ (headIdx * numFiltersInBlock + featureIdx) * sequenceLength;
const scalar_t* inputFeature = &input[IOOffset];
const scalar_t* gradOutputFeature = &gradOutput[IOOffset];
scalar_t* gradInputFeature = &gradInput[IOOffset];
load_input_to_shared<FS, SB, padding>(gradOutputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempGradOutput);
load_input_to_shared<FS, SB, padding_l>(inputFeature, inputOffset,
sequenceLength, chunkIdx,
numChunks, true, tempInput);
__syncthreads();
// sum input and weight gradients
scalar_t out = scalar_t(0.0);
#pragma unroll
for (int k = 0; k < FS; ++k) {
tempGradSum[k] += tempInput[tid + k] * tempGradOutput[tid + padding];
out += bfilter[k] * tempGradOutput[tid + k];
}
if (inputOffset + tid < sequenceLength) {
gradInputFeature[inputOffset + tid] = out;
}
}
const int gradOffset = batchIdx * numHeads * FS * sequenceLength
+ headIdx * FS * sequenceLength;
scalar_t *gradWeightFeature = &gradWeight[gradOffset];
// write weight gradient
if (inputOffset + tid < sequenceLength) {
for (int k = 0; k < FS; ++k) {
const int outputOffset = k * sequenceLength + inputOffset + tid;
gradWeightFeature[outputOffset] = tempGradSum[k];
}
}
}
|
93ffb28a7863f1cc43231946e897cd1337196bff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file checksum.cu
*
* \brief CUDA kernel for the calculation of checksum.
*/
#include "acc_common.hpp"
#include "acc_runtime.hpp"
__global__ void double_complex_checksum_gpu_kernel
(
acc_complex_double_t const* ptr__,
size_t size__,
acc_complex_double_t *result__
)
{
int N = num_blocks(size__, blockDim.x);
ACC_DYNAMIC_SHARED( char, sdata_ptr)
double* sdata_x = (double*)&sdata_ptr[0];
double* sdata_y = (double*)&sdata_ptr[blockDim.x * sizeof(double)];
sdata_x[threadIdx.x] = 0.0;
sdata_y[threadIdx.x] = 0.0;
for (int n = 0; n < N; n++) {
int j = n * blockDim.x + threadIdx.x;
if (j < size__) {
sdata_x[threadIdx.x] += ptr__[j].x;
sdata_y[threadIdx.x] += ptr__[j].y;
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
sdata_x[threadIdx.x] = sdata_x[threadIdx.x] + sdata_x[threadIdx.x + s];
sdata_y[threadIdx.x] = sdata_y[threadIdx.x] + sdata_y[threadIdx.x + s];
}
__syncthreads();
}
*result__ = make_accDoubleComplex(sdata_x[0], sdata_y[0]);
}
extern "C" void double_complex_checksum_gpu(acc_complex_double_t const* ptr__,
size_t size__,
acc_complex_double_t* result__)
{
dim3 grid_t(64);
dim3 grid_b(1);
acc_complex_double_t* res;
res = acc::allocate<acc_complex_double_t>(1);
accLaunchKernel((double_complex_checksum_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), 0,
ptr__,
size__,
res
);
acc::copyout(result__, res, 1);
acc::deallocate(res);
}
| 93ffb28a7863f1cc43231946e897cd1337196bff.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file checksum.cu
*
* \brief CUDA kernel for the calculation of checksum.
*/
#include "acc_common.hpp"
#include "acc_runtime.hpp"
__global__ void double_complex_checksum_gpu_kernel
(
acc_complex_double_t const* ptr__,
size_t size__,
acc_complex_double_t *result__
)
{
int N = num_blocks(size__, blockDim.x);
ACC_DYNAMIC_SHARED( char, sdata_ptr)
double* sdata_x = (double*)&sdata_ptr[0];
double* sdata_y = (double*)&sdata_ptr[blockDim.x * sizeof(double)];
sdata_x[threadIdx.x] = 0.0;
sdata_y[threadIdx.x] = 0.0;
for (int n = 0; n < N; n++) {
int j = n * blockDim.x + threadIdx.x;
if (j < size__) {
sdata_x[threadIdx.x] += ptr__[j].x;
sdata_y[threadIdx.x] += ptr__[j].y;
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
sdata_x[threadIdx.x] = sdata_x[threadIdx.x] + sdata_x[threadIdx.x + s];
sdata_y[threadIdx.x] = sdata_y[threadIdx.x] + sdata_y[threadIdx.x + s];
}
__syncthreads();
}
*result__ = make_accDoubleComplex(sdata_x[0], sdata_y[0]);
}
extern "C" void double_complex_checksum_gpu(acc_complex_double_t const* ptr__,
size_t size__,
acc_complex_double_t* result__)
{
dim3 grid_t(64);
dim3 grid_b(1);
acc_complex_double_t* res;
res = acc::allocate<acc_complex_double_t>(1);
accLaunchKernel((double_complex_checksum_gpu_kernel), dim3(grid_b), dim3(grid_t), 2 * grid_t.x * sizeof(double), 0,
ptr__,
size__,
res
);
acc::copyout(result__, res, 1);
acc::deallocate(res);
}
|
ece34265fc282c463e703309e9cc8b4ac2839244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(hipSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(hipMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), hipMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = common::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = ::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
~DeviceShard() {
dh::safe_cuda(hipSetDevice(device_));
}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(hipSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes_.data().get(), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
hipMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(hipMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
hipMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(hipMemcpyAsync(tree_group_.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
hipMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(hipSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0,
dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, num_features,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
dh::device_vector<DevicePredictionNode> nodes_;
dh::device_vector<size_t> tree_segments_;
dh::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, model.param.num_feature, out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor", learner_param_)) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus,
dmat->Info().num_row_);
CHECK_NE(devices.Size(), 0);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
| ece34265fc282c463e703309e9cc8b4ac2839244.cu | /*!
* Copyright 2017-2018 by Contributors
*/
#include <dmlc/parameter.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <xgboost/predictor.h>
#include <xgboost/tree_model.h>
#include <xgboost/tree_updater.h>
#include <memory>
#include "../common/common.h"
#include "../common/device_helpers.cuh"
#include "../common/host_device_vector.h"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
template <typename IterT>
void IncrementOffset(IterT begin_itr, IterT end_itr, size_t amount) {
thrust::transform(begin_itr, end_itr, begin_itr,
[=] __device__(size_t elem) { return elem + amount; });
}
/**
* \struct DevicePredictionNode
*
* \brief Packed 16 byte representation of a tree node for use in device
* prediction
*/
struct DevicePredictionNode {
XGBOOST_DEVICE DevicePredictionNode()
: fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {}
union NodeValue {
float leaf_weight;
float fvalue;
};
int fidx;
int left_child_idx;
int right_child_idx;
NodeValue val;
DevicePredictionNode(const RegTree::Node& n) { // NOLINT
static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes");
this->left_child_idx = n.LeftChild();
this->right_child_idx = n.RightChild();
this->fidx = n.SplitIndex();
if (n.DefaultLeft()) {
fidx |= (1U << 31);
}
if (n.IsLeaf()) {
this->val.leaf_weight = n.LeafValue();
} else {
this->val.fvalue = n.SplitCond();
}
}
XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; }
XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); }
XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; }
XGBOOST_DEVICE int MissingIdx() const {
if (MissingLeft()) {
return this->left_child_idx;
} else {
return this->right_child_idx;
}
}
XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; }
XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; }
};
struct ElementLoader {
bool use_shared;
common::Span<const size_t> d_row_ptr;
common::Span<const Entry> d_data;
int num_features;
float* smem;
size_t entry_start;
__device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr,
common::Span<const Entry> entry, int num_features,
float* smem, int num_rows, size_t entry_start)
: use_shared(use_shared),
d_row_ptr(row_ptr),
d_data(entry),
num_features(num_features),
smem(smem),
entry_start(entry_start) {
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = d_row_ptr[global_idx];
bst_uint elem_end = d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = d_data[elem_idx - entry_start];
smem[threadIdx.x * num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetFvalue(int ridx, int fidx) {
if (use_shared) {
return smem[threadIdx.x * num_features + fidx];
} else {
// Binary search
auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start);
auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start);
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
}
};
__device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree,
ElementLoader* loader) {
DevicePredictionNode n = tree[0];
while (!n.IsLeaf()) {
float fvalue = loader->GetFvalue(ridx, n.GetFidx());
// Missing value
if (isnan(fvalue)) {
n = tree[n.MissingIdx()];
} else {
if (fvalue < n.GetFvalue()) {
n = tree[n.left_child_idx];
} else {
n = tree[n.right_child_idx];
}
}
}
return n.GetWeight();
}
template <int BLOCK_THREADS>
__global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t> d_tree_segments,
common::Span<int> d_tree_group,
common::Span<const size_t> d_row_ptr,
common::Span<const Entry> d_data, size_t tree_begin,
size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start,
bool use_shared, int num_group) {
extern __shared__ float smem[];
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem,
num_rows, entry_start);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
sum += GetLeafWeight(global_idx, d_tree, &loader);
}
d_out_predictions[global_idx] += sum;
} else {
for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
const DevicePredictionNode* d_tree =
&d_nodes[d_tree_segments[tree_idx - tree_begin]];
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight(global_idx, d_tree, &loader);
}
}
}
class GPUPredictor : public xgboost::Predictor {
protected:
struct DevicePredictionCacheEntry {
std::shared_ptr<DMatrix> data;
HostDeviceVector<bst_float> predictions;
};
private:
void DeviceOffsets(const HostDeviceVector<size_t>& data,
size_t total_size,
std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
offsets.resize(devices_.Size() + 1);
offsets[0] = 0;
#pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1)
for (int shard = 0; shard < devices_.Size(); ++shard) {
int device = devices_.DeviceId(shard);
auto data_span = data.DeviceSpan(device);
dh::safe_cuda(cudaSetDevice(device));
if (data_span.size() == 0) {
offsets[shard + 1] = total_size;
} else {
// copy the last element from every shard
dh::safe_cuda(cudaMemcpy(&offsets.at(shard + 1),
&data_span[data_span.size()-1],
sizeof(size_t), cudaMemcpyDeviceToHost));
}
}
}
// This function populates the explicit offsets that can be used to create a window into the
// underlying host vector. The window starts from the `batch_offset` and has a size of
// `batch_size`, and is sharded across all the devices. Each shard is granular depending on
// the number of output classes `n_classes`.
void PredictionDeviceOffsets(size_t total_size, size_t batch_offset, size_t batch_size,
int n_classes, std::vector<size_t>* out_offsets) {
auto& offsets = *out_offsets;
size_t n_shards = devices_.Size();
offsets.resize(n_shards + 2);
size_t rows_per_shard = common::DivRoundUp(batch_size, n_shards);
for (size_t shard = 0; shard < devices_.Size(); ++shard) {
size_t n_rows = std::min(batch_size, shard * rows_per_shard);
offsets[shard] = batch_offset + n_rows * n_classes;
}
offsets[n_shards] = batch_offset + batch_size * n_classes;
offsets[n_shards + 1] = total_size;
}
struct DeviceShard {
DeviceShard() : device_{-1} {}
~DeviceShard() {
dh::safe_cuda(cudaSetDevice(device_));
}
void Init(int device) {
this->device_ = device;
max_shared_memory_bytes_ = dh::MaxSharedMemory(this->device_);
}
void InitModel(const gbm::GBTreeModel& model,
const thrust::host_vector<size_t>& h_tree_segments,
const thrust::host_vector<DevicePredictionNode>& h_nodes,
size_t tree_begin, size_t tree_end) {
dh::safe_cuda(cudaSetDevice(device_));
nodes_.resize(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes_.data().get(), h_nodes.data(),
sizeof(DevicePredictionNode) * h_nodes.size(),
cudaMemcpyHostToDevice));
tree_segments_.resize(h_tree_segments.size());
dh::safe_cuda(cudaMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(),
sizeof(size_t) * h_tree_segments.size(),
cudaMemcpyHostToDevice));
tree_group_.resize(model.tree_info.size());
dh::safe_cuda(cudaMemcpyAsync(tree_group_.data().get(), model.tree_info.data(),
sizeof(int) * model.tree_info.size(),
cudaMemcpyHostToDevice));
this->tree_begin_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group_ = model.param.num_output_group;
}
void PredictInternal
(const SparsePage& batch, size_t num_features,
HostDeviceVector<bst_float>* predictions) {
if (predictions->DeviceSize(device_) == 0) { return; }
dh::safe_cuda(cudaSetDevice(device_));
const int BLOCK_THREADS = 128;
size_t num_rows = batch.offset.DeviceSize(device_) - 1;
const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS));
int shared_memory_bytes = static_cast<int>
(sizeof(float) * num_features * BLOCK_THREADS);
bool use_shared = true;
if (shared_memory_bytes > max_shared_memory_bytes_) {
shared_memory_bytes = 0;
use_shared = false;
}
const auto& data_distr = batch.data.Distribution();
size_t entry_start = data_distr.ShardStart(batch.data.Size(),
data_distr.Devices().Index(device_));
PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>>
(dh::ToSpan(nodes_), predictions->DeviceSpan(device_), dh::ToSpan(tree_segments_),
dh::ToSpan(tree_group_), batch.offset.DeviceSpan(device_),
batch.data.DeviceSpan(device_), this->tree_begin_, this->tree_end_, num_features,
num_rows, entry_start, use_shared, this->num_group_);
}
private:
int device_;
dh::device_vector<DevicePredictionNode> nodes_;
dh::device_vector<size_t> tree_segments_;
dh::device_vector<int> tree_group_;
size_t max_shared_memory_bytes_;
size_t tree_begin_;
size_t tree_end_;
int num_group_;
};
void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) {
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
thrust::host_vector<size_t> h_tree_segments;
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
std::copy(src_nodes.begin(), src_nodes.end(),
h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]);
}
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard &shard) {
shard.InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end);
});
}
void DevicePredictInternal(DMatrix* dmat,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) {
if (tree_end - tree_begin == 0) { return; }
monitor_.StartCuda("DevicePredictInternal");
InitModel(model, tree_begin, tree_end);
size_t batch_offset = 0;
for (auto &batch : dmat->GetRowBatches()) {
bool is_external_memory = batch.Size() < dmat->Info().num_row_;
if (is_external_memory) {
std::vector<size_t> out_preds_offsets;
PredictionDeviceOffsets(out_preds->Size(), batch_offset, batch.Size(),
model.param.num_output_group, &out_preds_offsets);
out_preds->Reshard(GPUDistribution::Explicit(devices_, out_preds_offsets));
}
batch.offset.Shard(GPUDistribution::Overlap(devices_, 1));
std::vector<size_t> device_offsets;
DeviceOffsets(batch.offset, batch.data.Size(), &device_offsets);
batch.data.Reshard(GPUDistribution::Explicit(devices_, device_offsets));
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.PredictInternal(batch, model.param.num_feature, out_preds);
});
batch_offset += batch.Size() * model.param.num_output_group;
}
out_preds->Reshard(GPUDistribution::Granular(devices_, model.param.num_output_group));
monitor_.StopCuda("DevicePredictInternal");
}
public:
GPUPredictor() // NOLINT
: cpu_predictor_(Predictor::Create("cpu_predictor", learner_param_)) {}
void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, int tree_begin,
unsigned ntree_limit = 0) override {
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus,
dmat->Info().num_row_);
CHECK_NE(devices.Size(), 0);
ConfigureShards(devices);
if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) {
return;
}
this->InitOutPredictions(dmat->Info(), out_preds, model);
int tree_end = ntree_limit * model.param.num_output_group;
if (ntree_limit == 0 || ntree_limit > model.trees.size()) {
tree_end = static_cast<unsigned>(model.trees.size());
}
DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
protected:
void InitOutPredictions(const MetaInfo& info,
HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model) const {
size_t n_classes = model.param.num_output_group;
size_t n = n_classes * info.num_row_;
const HostDeviceVector<bst_float>& base_margin = info.base_margin_;
out_preds->Shard(GPUDistribution::Granular(devices_, n_classes));
out_preds->Resize(n);
if (base_margin.Size() != 0) {
CHECK_EQ(out_preds->Size(), n);
out_preds->Copy(base_margin);
} else {
out_preds->Fill(model.base_margin);
}
}
bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit) {
if (ntree_limit == 0 ||
ntree_limit * model.param.num_output_group >= model.trees.size()) {
auto it = cache_.find(dmat);
if (it != cache_.end()) {
const HostDeviceVector<bst_float>& y = it->second.predictions;
if (y.Size() != 0) {
monitor_.StartCuda("PredictFromCache");
out_preds->Shard(y.Distribution());
out_preds->Resize(y.Size());
out_preds->Copy(y);
monitor_.StopCuda("PredictFromCache");
return true;
}
}
}
return false;
}
void UpdatePredictionCache(
const gbm::GBTreeModel& model,
std::vector<std::unique_ptr<TreeUpdater>>* updaters,
int num_new_trees) override {
auto old_ntree = model.trees.size() - num_new_trees;
// update cache entry
for (auto& kv : cache_) {
PredictionCacheEntry& e = kv.second;
DMatrix* dmat = kv.first;
HostDeviceVector<bst_float>& predictions = e.predictions;
if (predictions.Size() == 0) {
this->InitOutPredictions(dmat->Info(), &predictions, model);
}
if (model.param.num_output_group == 1 && updaters->size() > 0 &&
num_new_trees == 1 &&
updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) {
// do nothing
} else {
DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size());
}
}
}
void PredictInstance(const SparsePage::Inst& inst,
std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model, unsigned ntree_limit,
unsigned root_index) override {
cpu_predictor_->PredictInstance(inst, out_preds, model, root_index);
}
void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds,
const gbm::GBTreeModel& model,
unsigned ntree_limit) override {
cpu_predictor_->PredictLeaf(p_fmat, out_preds, model, ntree_limit);
}
void PredictContribution(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned ntree_limit,
bool approximate, int condition,
unsigned condition_feature) override {
cpu_predictor_->PredictContribution(p_fmat, out_contribs, model, ntree_limit,
approximate, condition,
condition_feature);
}
void PredictInteractionContributions(DMatrix* p_fmat,
std::vector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned ntree_limit,
bool approximate) override {
cpu_predictor_->PredictInteractionContributions(p_fmat, out_contribs, model,
ntree_limit, approximate);
}
void Init(const std::vector<std::pair<std::string, std::string>>& cfg,
const std::vector<std::shared_ptr<DMatrix>>& cache) override {
Predictor::Init(cfg, cache);
cpu_predictor_->Init(cfg, cache);
GPUSet devices = GPUSet::All(learner_param_->gpu_id, learner_param_->n_gpus);
ConfigureShards(devices);
}
private:
/*! \brief Re configure shards when GPUSet is changed. */
void ConfigureShards(GPUSet devices) {
if (devices_ == devices) return;
devices_ = devices;
shards_.clear();
shards_.resize(devices_.Size());
dh::ExecuteIndexShards(&shards_, [=](size_t i, DeviceShard& shard){
shard.Init(devices_.DeviceId(i));
});
}
std::unique_ptr<Predictor> cpu_predictor_;
std::vector<DeviceShard> shards_;
GPUSet devices_;
common::Monitor monitor_;
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([]() { return new GPUPredictor(); });
} // namespace predictor
} // namespace xgboost
|
8262aa7a2beb9dae7b9e629d4b400a37f2863534.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmergecgs.cu, normal z -> c, Mon Jun 25 18:24:25 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from ccgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_ccgs_1_kernel(
int num_rows,
int num_cols,
magmaFloatComplex beta,
magmaFloatComplex *r,
magmaFloatComplex *q,
magmaFloatComplex *u,
magmaFloatComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaFloatComplex
scalar
@param[in]
r magmaFloatComplex_ptr
vector
@param[in]
q magmaFloatComplex_ptr
vector
@param[in,out]
u magmaFloatComplex_ptr
vector
@param[in,out]
p magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex beta,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr q,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ccgs_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_2_kernel(
int num_rows,
int num_cols,
magmaFloatComplex *r,
magmaFloatComplex *u,
magmaFloatComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaFloatComplex_ptr
vector
@param[in,out]
u magmaFloatComplex_ptr
vector
@param[in,out]
p magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ccgs_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_3_kernel(
int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *v_hat,
magmaFloatComplex *u,
magmaFloatComplex *q,
magmaFloatComplex *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaFloatComplex
scalar
@param[in]
v_hat magmaFloatComplex_ptr
vector
@param[in]
u magmaFloatComplex_ptr
vector
@param[in,out]
q magmaFloatComplex_ptr
vector
@param[in,out]
t magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex alpha,
magmaFloatComplex_ptr v_hat,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr q,
magmaFloatComplex_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ccgs_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_4_kernel(
int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *u_hat,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaFloatComplex
scalar
@param[in]
u_hat magmaFloatComplex_ptr
vector
@param[in]
t magmaFloatComplex_ptr
vector
@param[in,out]
x magmaFloatComplex_ptr
vector
@param[in,out]
r magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex alpha,
magmaFloatComplex_ptr u_hat,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_ccgs_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
| 8262aa7a2beb9dae7b9e629d4b400a37f2863534.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmergecgs.cu, normal z -> c, Mon Jun 25 18:24:25 2018
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from ccgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_ccgs_1_kernel(
int num_rows,
int num_cols,
magmaFloatComplex beta,
magmaFloatComplex *r,
magmaFloatComplex *q,
magmaFloatComplex *u,
magmaFloatComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta magmaFloatComplex
scalar
@param[in]
r magmaFloatComplex_ptr
vector
@param[in]
q magmaFloatComplex_ptr
vector
@param[in,out]
u magmaFloatComplex_ptr
vector
@param[in,out]
p magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex beta,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr q,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ccgs_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_2_kernel(
int num_rows,
int num_cols,
magmaFloatComplex *r,
magmaFloatComplex *u,
magmaFloatComplex *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaFloatComplex_ptr
vector
@param[in,out]
u magmaFloatComplex_ptr
vector
@param[in,out]
p magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ccgs_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_3_kernel(
int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *v_hat,
magmaFloatComplex *u,
magmaFloatComplex *q,
magmaFloatComplex *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
magmaFloatComplex uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaFloatComplex
scalar
@param[in]
v_hat magmaFloatComplex_ptr
vector
@param[in]
u magmaFloatComplex_ptr
vector
@param[in,out]
q magmaFloatComplex_ptr
vector
@param[in,out]
t magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex alpha,
magmaFloatComplex_ptr v_hat,
magmaFloatComplex_ptr u,
magmaFloatComplex_ptr q,
magmaFloatComplex_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ccgs_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_ccgs_4_kernel(
int num_rows,
int num_cols,
magmaFloatComplex alpha,
magmaFloatComplex *u_hat,
magmaFloatComplex *t,
magmaFloatComplex *x,
magmaFloatComplex *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha magmaFloatComplex
scalar
@param[in]
u_hat magmaFloatComplex_ptr
vector
@param[in]
t magmaFloatComplex_ptr
vector
@param[in,out]
x magmaFloatComplex_ptr
vector
@param[in,out]
r magmaFloatComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_ccgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloatComplex alpha,
magmaFloatComplex_ptr u_hat,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_ccgs_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
|
6d26c208f2202ff41dfa1a544b94081ffdfdf89e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box2d4r-256-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
| 6d26c208f2202ff41dfa1a544b94081ffdfdf89e.cu | #include "box2d4r-256-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
|
935a7b4d62c7ee7813a5aa917aa92ed171159681.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(v.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
rowSize);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
fast_dim_size,
slower_dims_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_g = at::empty_like(saved_g, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(grad_w.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
rowSize);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
fast_dim_size,
slower_dims_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
| 935a7b4d62c7ee7813a5aa917aa92ed171159681.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>
<<<v.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
rowSize);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
fast_dim_size,
slower_dims_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_g = at::empty_like(saved_g, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>
<<<grad_w.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
rowSize);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
fast_dim_size,
slower_dims_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
|
cb1c6da1024885e1edce42a607eda134b425a688.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
int main() {
hipEvent_t start, stop, done_offload;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&done_offload);
void *p, *q;
long size = 1024l * 1024 * 200;
hipMalloc(&p, size);
hipHostMalloc(&q, size);
cout << "without split by event\n";
int N = 100;
hipEventRecord(start);
for (int i = 0; i < N; i++) {
hipMemcpyAsync(q, p, size, hipMemcpyDeviceToHost, NULL);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli;
hipEventElapsedTime(&milli, start, stop);
cout << "Time(ms): " << milli << endl;
cout << "with split by event\n";
hipEventRecord(start);
for (int i = 0; i < N; i++) {
hipMemcpyAsync(q, p, size, hipMemcpyDeviceToHost, NULL);
hipEventRecord(done_offload, NULL);
hipEventSynchronize(done_offload);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
cout << "Time(ms): " << milli << endl;
} | cb1c6da1024885e1edce42a607eda134b425a688.cu | #include <iostream>
using namespace std;
int main() {
cudaEvent_t start, stop, done_offload;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&done_offload);
void *p, *q;
long size = 1024l * 1024 * 200;
cudaMalloc(&p, size);
cudaMallocHost(&q, size);
cout << "without split by event\n";
int N = 100;
cudaEventRecord(start);
for (int i = 0; i < N; i++) {
cudaMemcpyAsync(q, p, size, cudaMemcpyDeviceToHost, NULL);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
cout << "Time(ms): " << milli << endl;
cout << "with split by event\n";
cudaEventRecord(start);
for (int i = 0; i < N; i++) {
cudaMemcpyAsync(q, p, size, cudaMemcpyDeviceToHost, NULL);
cudaEventRecord(done_offload, NULL);
cudaEventSynchronize(done_offload);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
cout << "Time(ms): " << milli << endl;
} |
9e7fdadad1a4b164a71422d64d019935a09a5d25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define __CUDA
#include <cstdio>
// #include <iostream>
//#include <cutil.h>
#include <helper_cuda.h>
/*
#include <helper_cuda_drvapi.h>
#include <helper_cuda_gl.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_image.h>
#include <helper_math.h>
#include <helper_string.h>
#include <helper_timer.h>
*/
#include "hermite4-gpu.h"
#define NTHREADS 128
//#define NJBLOCKS 16 // GF 8800 GTS 512
//#define NJBLOCKS_ 16
//#define NJBLOCKS 30 // GF 280 GTX
//#define NJBLOCKS_ 32
#define NJBLOCKS 30 // GF 480 GTX
#define NJBLOCKS_ 32
#define NREDUCE (NTHREADS/NJBLOCKS_)
#define NIBLOCKS 32
#define NIMAX (NTHREADS * NIBLOCKS) // 2048
#define GPU_REDUCE
struct Force_dev{
float2 acc[3];
// float2 pot;
float jrk[3];
// float pad0;
float pad[3];
__device__ Force_dev(){
// acc[0] = acc[1] = acc[2] = pot = make_float2(0.f, 0.f);
acc[0] = acc[1] = acc[2] = make_float2(0.f, 0.f);
jrk[0] = jrk[1] = jrk[2] = 0.f;
}
};
__device__ void force_reduce(Force_dev &fl, Force_dev &fr){
#pragma unroll
for(int k=0; k<3; k++){
fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].x);
fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].y);
fl.jrk[k] += fr.jrk[k];
}
// fl.pot = float2_accum(fl.pot, fr.pot.x);
// fl.pot = float2_accum(fl.pot, fr.pot.y);
}
__device__ void h4_kernel(
const Predictor &ip,
const Predictor &jp,
Force_dev &fo,
float eps2){
#if 0
float dx = float2_sub(jp.pos[0], ip.pos[0]);
float dy = float2_sub(jp.pos[1], ip.pos[1]);
float dz = float2_sub(jp.pos[2], ip.pos[2]);
float dvx = jp.vel[0] - ip.vel[0];
float dvy = jp.vel[1] - ip.vel[1];
float dvz = jp.vel[2] - ip.vel[2];
float dax = jp.acc[0] - ip.acc[0];
float day = jp.acc[1] - ip.acc[1];
float daz = jp.acc[2] - ip.acc[2];
#else
float dx = (jp.posH.x - ip.posH.x) + (jp.posL.x - ip.posL.x);
float dy = (jp.posH.y - ip.posH.y) + (jp.posL.y - ip.posL.y);
float dz = (jp.posH.z - ip.posH.z) + (jp.posL.z - ip.posL.z);
float dvx = jp.vel.x - ip.vel.x;
float dvy = jp.vel.y - ip.vel.y;
float dvz = jp.vel.z - ip.vel.z;
#endif
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float drdv = dx*dvx + dy*dvy + dz*dvz;
float rinv1 = rsqrtf(r2);
float rinv2 = rinv1 * rinv1;
float alpha = (drdv)*rinv2;
// rinv1 *= jp.mass;
rinv1 *= jp.posH.w;
float rinv3 = rinv1 * rinv2;
// float pot = rinv1;
float ax = rinv3*dx;
float ay = rinv3*dy;
float az = rinv3*dz;
float jx = rinv3*dvx + (-3.f*alpha)*ax;
float jy = rinv3*dvy + (-3.f*alpha)*ay;
float jz = rinv3*dvz + (-3.f*alpha)*az;
#if 0
if(r2 != eps2){
fo.pot = float2_accum(fo.pot, pot);
}
#endif
fo.acc[0] = float2_accum(fo.acc[0], ax);
fo.acc[1] = float2_accum(fo.acc[1], ay);
fo.acc[2] = float2_accum(fo.acc[2], az);
fo.jrk[0] += jx;
fo.jrk[1] += jy;
fo.jrk[2] += jz;
}
__global__ void h4_gravity(
int ni,
int nj,
Predictor ipred[],
Predictor jpred[],
Force_dev force[][NJBLOCKS_],
float eps2){
int ibid = blockIdx.x;
int jbid = blockIdx.y;
int tid = threadIdx.x;
int iaddr = tid + NTHREADS * ibid;
int jstart = (nj * (jbid )) / NJBLOCKS;
int jend = (nj * (jbid+1)) / NJBLOCKS;
// small kernel opt
int nskip = 1;
int niloc = ni - NTHREADS * ibid;
if(niloc <= NTHREADS/2) nskip = 2;
if(niloc <= NTHREADS/4) nskip = 4;
if(niloc <= NTHREADS/8) nskip = 8;
if(niloc <= NTHREADS/16) nskip = 16;
if(niloc <= NTHREADS/32) nskip = 32;
int joff = tid / (NTHREADS/nskip);
__shared__ Predictor jpshare[NTHREADS];
Force_dev fo;
Predictor ip = ipred[tid % (NTHREADS/nskip) + NTHREADS * ibid];
for(int j=jstart; j<jend; j+=NTHREADS){
__syncthreads();
#if 0
jpshare[tid] = jpred[j+tid];
#else
float4 *src = (float4 *)&jpred[j];
float4 *dst = (float4 *)jpshare;
for(int it=0; it<sizeof(Predictor)/sizeof(float4); it++){
dst[tid] = src[tid];
dst += NTHREADS;
src += NTHREADS;
}
#endif
__syncthreads();
if(jend-j < NTHREADS){
for(int jj=0; jj<jend-j; jj+=nskip){
Predictor &jp = jpshare[jj+joff];
if(jj+joff < jend-j) h4_kernel(ip, jp, fo, eps2);
}
}else{
#if 0
#pragma unroll
for(int jj=0; jj<NTHREADS; jj+=nskip){
Predictor &jp = jpshare[jj+joff];
h6_kernel(ip, jp, fo, eps2);
}
#else
for(int jj=0; jj<NTHREADS; jj+=4*nskip){
Predictor &jp0 = jpshare[0*nskip+jj+joff];
Predictor &jp1 = jpshare[1*nskip+jj+joff];
Predictor &jp2 = jpshare[2*nskip+jj+joff];
Predictor &jp3 = jpshare[3*nskip+jj+joff];
h4_kernel(ip, jp0, fo, eps2);
h4_kernel(ip, jp1, fo, eps2);
h4_kernel(ip, jp2, fo, eps2);
h4_kernel(ip, jp3, fo, eps2);
}
#endif
}
}
// horizontal reduce
// __shared__ Force_dev foshare[NTHREADS];
Force_dev *foshare = (Force_dev *)jpshare;
__syncthreads();
foshare[tid] = fo;
__syncthreads();
if(nskip > 1){
if(tid < NTHREADS/2){
force_reduce(foshare[tid], foshare[tid + NTHREADS/2]);
}
__syncthreads();
}
if(nskip > 2){
if(tid < NTHREADS/4){
force_reduce(foshare[tid], foshare[tid + NTHREADS/4]);
}
__syncthreads();
}
if(nskip > 4){
if(tid < NTHREADS/8){
force_reduce(foshare[tid], foshare[tid + NTHREADS/8]);
}
__syncthreads();
}
if(nskip > 8){
if(tid < NTHREADS/16){
force_reduce(foshare[tid], foshare[tid + NTHREADS/16]);
}
__syncthreads();
}
if(nskip > 16){
if(tid < NTHREADS/32){
force_reduce(foshare[tid], foshare[tid + NTHREADS/32]);
}
__syncthreads();
}
// store
if(tid < niloc){
fo = foshare[tid];
force[iaddr][jbid] = fo;
}
}
#ifdef GPU_REDUCE
__global__ void reduce_kernel(
Force_dev fo_dev[][NJBLOCKS_],
Force_dev fo_reduce[])
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int ioff = bid * NREDUCE;
#if 0
__shared__ Force_dev fo_share[NTHREADS];
#else
__shared__ Predictor jpshare[NTHREADS];
Force_dev *fo_share = (Force_dev *)jpshare;
#endif
#if 0
fo_share[tid] = fo_dev[ioff][tid];
#else
float4 *src = (float4 *)fo_dev[ioff];
float4 *dst = (float4 *)fo_share;
for(int it=0; it<sizeof(Force_dev)/sizeof(float4); it++){
dst[tid] = src[tid];
dst += NTHREADS;
src += NTHREADS;
}
#endif
__syncthreads();
int n = NJBLOCKS_;
while(n > 1){
n /= 2;
if(tid % NJBLOCKS_ < n){
force_reduce(fo_share[tid], fo_share[tid + n]);
}
}
__syncthreads();
if(tid % NJBLOCKS_ == 0){
// fo_reduce[ioff + tid / NJBLOCKS_] = fo_share[tid];
fo_share[tid / NJBLOCKS_] = fo_share[tid];
}
__syncthreads();
#if 0
if(tid < NREDUCE){
fo_reduce[ioff + tid] = fo_share[tid];
}
#else
if(tid < NREDUCE * sizeof(Force_dev) / sizeof(float)){ // (tid < 96)
float *dst = (float *)&fo_reduce[ioff];
float *src = (float *)fo_share;
dst[tid] = src[tid];
}
#endif
}
#endif
extern double wtime();
void calc_force(
int nitot,
int nj,
float eps2,
Predictor ipred[],
Predictor jpred[],
Force force[],
double &t1,
double &t_send,
double &t_recv){
static Predictor *jp_dev = NULL;
static Predictor *ip_dev = NULL;
static Force_dev (*fo_dev)[NJBLOCKS_] = NULL;
#ifdef GPU_REDUCE
static Force_dev (*fo_reduce) = NULL;
static Force_dev (*fo_host) = NULL;
#else
static Force_dev (*fo_host)[NJBLOCKS_] = NULL;
#endif
if(jp_dev == NULL){ // first call
/*
const int dev = 0;
// CUDA_SAFE_CALL(hipSetDevice(dev));
checkCudaErrors(hipSetDevice(dev));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
printf("GPU: %s\n", deviceProp.name);
*/
hipMalloc((void **)&jp_dev, (nj + NTHREADS) * sizeof(Predictor));
hipMalloc((void **)&ip_dev, NIMAX * sizeof(Predictor));
hipMalloc((void **)&fo_dev, NIMAX * sizeof(*fo_dev));
hipMemset(fo_dev, 0, NIMAX * sizeof(*fo_dev));
#ifdef GPU_REDUCE
hipMalloc((void **)&fo_reduce, NIMAX * sizeof(*fo_reduce));
#endif
hipHostMalloc((void **)&fo_host, NIMAX * sizeof(*fo_host));
}
hipMemcpy(jp_dev, jpred, nj * sizeof(Predictor), hipMemcpyHostToDevice);
t1 = wtime();
int nimax = NIMAX;
for(int ioff=0; ioff<nitot; ioff+=nimax){
int ni = ::min(nimax, nitot-ioff);
double t2 = wtime();
hipMemcpy(ip_dev, ipred+ioff, ni * sizeof(Predictor), hipMemcpyHostToDevice);
double t3 = wtime();
t_send += t3 - t2;
// kernel call
int niblocks = 1 + (ni-1) / NTHREADS;
dim3 grid(niblocks, NJBLOCKS, 1);
dim3 threads(NTHREADS, 1, 1);
// std::cerr << "call h6_gravity " << niblocks << std::endl;
//int sharedMemSize = NTHREADS * sizeof(Predictor);
// h6_gravity <<< grid, threads, sharedMemSize >>>
// (ni, nj, ip_dev, jp_dev, fo_dev, eps2);
hipLaunchKernelGGL(( h4_gravity) , dim3(grid), dim3(threads) , 0, 0,
ni, nj, ip_dev, jp_dev, fo_dev, eps2);
#ifdef GPU_REDUCE
dim3 grid_reduce(1 + (ni-1)/NREDUCE, 1, 1);
hipLaunchKernelGGL(( reduce_kernel) , dim3(grid_reduce), dim3(threads) , 0, 0, fo_dev, fo_reduce);
hipDeviceSynchronize();
double t4 = wtime();
hipMemcpy(fo_host, fo_reduce, ni * sizeof(*fo_reduce), hipMemcpyDeviceToHost);
double t5 = wtime();
t_recv += t5 - t4;
for(int i=0; i<ni; i++){
Force f; // 0 flashed by the constructer
Force_dev &fo = fo_host[i];
f.acc.x = float2_reduce(fo.acc[0]);
f.acc.y = float2_reduce(fo.acc[1]);
f.acc.z = float2_reduce(fo.acc[2]);
// f.pot = float2_reduce(fo.pot);
f.jrk.x = fo.jrk[0];
f.jrk.y = fo.jrk[1];
f.jrk.z = fo.jrk[2];
force[ioff + i] = f;
}
#else
hipMemcpy(fo_host, fo_dev, ni * sizeof(*fo_dev), hipMemcpyDeviceToHost);
// std::cerr << "done" << std::endl;
for(int i=0; i<ni; i++){
Force f; // 0 flashed by the constructer
for(int jb=0; jb<NJBLOCKS; jb++){
Force_dev &fo = fo_host[i][jb];
f.acc.x += float2_reduce(fo.acc[0]);
f.acc.y += float2_reduce(fo.acc[1]);
f.acc.z += float2_reduce(fo.acc[2]);
f.pot -= float2_reduce(fo.pot);
f.jrk.x += fo.jrk[0];
f.jrk.y += fo.jrk[1];
f.jrk.z += fo.jrk[2];
f.snp.x += fo.snp[0];
f.snp.y += fo.snp[1];
f.snp.z += fo.snp[2];
}
force[ioff + i] = f;
}
#endif
}
}
__global__ void pot_kernel(
int js,
int je,
float eps2,
Posm posm[],
float2 pot[]){
int bid = blockIdx.x;
int tid = threadIdx.x;
int iaddr = tid + NTHREADS * bid;
Posm ip = posm[iaddr];
float2 poti = make_float2(0.f, 0.f);
for(int j=js; j<je; j+=NTHREADS){
__shared__ Posm posmshare[NTHREADS];
__syncthreads();
posmshare[tid] = posm[j + tid];
__syncthreads();
int njj = NTHREADS < je-j ? NTHREADS : je-j;
for(int jj=0; jj< njj; jj++){
Posm &jp = posmshare[jj];
float dx = float2_sub(jp.pos[0], ip.pos[0]);
float dy = float2_sub(jp.pos[1], ip.pos[1]);
float dz = float2_sub(jp.pos[2], ip.pos[2]);
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float mrinv = jp.mass * rsqrtf(r2);
if(r2 > eps2) poti = float2_accum(poti, mrinv);
}
}
pot[iaddr] = poti;
}
void calc_pot(
int ni,
int js,
int je,
float eps2,
Posm posm[],
double dpot[]){
Posm *posm_dev;
float2 *pot, *pot_dev;
hipMalloc((void **)&posm_dev, (ni+NTHREADS) * sizeof(Posm));
hipMalloc((void **)&pot_dev, (ni+NTHREADS) * sizeof(float2));
hipHostMalloc((void **)&pot, (ni+NTHREADS) * sizeof(float2));
hipMemcpy(posm_dev, posm, ni * sizeof(Posm), hipMemcpyHostToDevice);
int nblocks = 1 + (ni-1) / NTHREADS;
dim3 grid(nblocks, 1, 1);
dim3 threads(NTHREADS, 1, 1);
int sharedMemSize = NTHREADS * sizeof(Posm);
hipLaunchKernelGGL(( pot_kernel) , dim3(grid), dim3(threads), sharedMemSize , 0,
js, je, eps2, posm_dev, pot_dev);
hipMemcpy(pot, pot_dev, ni * sizeof(float2), hipMemcpyDeviceToHost);
for(int i=0; i<ni; i++){
dpot[i] = -float2_reduce(pot[i]);
}
hipFree(posm_dev);
hipFree(pot_dev);
hipHostFree(pot);
}
void CUDA_MPI_Init(int myRank){
int numGPU;
checkCudaErrors(hipGetDeviceCount(&numGPU));
// int dev; // golowood using all 8800
// if(myRank < 8) dev = 0; else dev = 1;
// const int dev = 0; // golowood using all 280
// if(myRank < 8) dev = 0; else dev = 1;
const int dev = myRank % numGPU;
checkCudaErrors(hipSetDevice(dev));
// checkCudaErrors(hipGetDevice(&dev));
// printf("Active Cuda Dev. : %d ; Total Number of Cuda Dev. : %d \n", dev, numGPU);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
printf("Rank %04d : GPU %d : %s \n", myRank, dev, deviceProp.name);
hipFuncSetCacheConfig(h4_gravity, hipFuncCachePreferShared);
hipFuncSetCacheConfig(reduce_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(pot_kernel, hipFuncCachePreferShared);
}
/*
void CUDA_MPI_Init(int myRank){
int numGPU;
checkCudaErrors(hipGetDeviceCount(&numGPU));
const int dev = myRank % numGPU;
checkCudaErrors(hipSetDevice(dev));
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
printf("Rank %02d : GPU %d : %s\n", myRank, dev, deviceProp.name);
hipFuncSetCacheConfig(h6_gravity, hipFuncCachePreferShared);
hipFuncSetCacheConfig(reduce_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(pot_kernel, hipFuncCachePreferShared);
}
*/ | 9e7fdadad1a4b164a71422d64d019935a09a5d25.cu | #define __CUDA
#include <cstdio>
// #include <iostream>
//#include <cutil.h>
#include <helper_cuda.h>
/*
#include <helper_cuda_drvapi.h>
#include <helper_cuda_gl.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_image.h>
#include <helper_math.h>
#include <helper_string.h>
#include <helper_timer.h>
*/
#include "hermite4-gpu.h"
#define NTHREADS 128
//#define NJBLOCKS 16 // GF 8800 GTS 512
//#define NJBLOCKS_ 16
//#define NJBLOCKS 30 // GF 280 GTX
//#define NJBLOCKS_ 32
#define NJBLOCKS 30 // GF 480 GTX
#define NJBLOCKS_ 32
#define NREDUCE (NTHREADS/NJBLOCKS_)
#define NIBLOCKS 32
#define NIMAX (NTHREADS * NIBLOCKS) // 2048
#define GPU_REDUCE
struct Force_dev{
float2 acc[3];
// float2 pot;
float jrk[3];
// float pad0;
float pad[3];
__device__ Force_dev(){
// acc[0] = acc[1] = acc[2] = pot = make_float2(0.f, 0.f);
acc[0] = acc[1] = acc[2] = make_float2(0.f, 0.f);
jrk[0] = jrk[1] = jrk[2] = 0.f;
}
};
__device__ void force_reduce(Force_dev &fl, Force_dev &fr){
#pragma unroll
for(int k=0; k<3; k++){
fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].x);
fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].y);
fl.jrk[k] += fr.jrk[k];
}
// fl.pot = float2_accum(fl.pot, fr.pot.x);
// fl.pot = float2_accum(fl.pot, fr.pot.y);
}
__device__ void h4_kernel(
const Predictor &ip,
const Predictor &jp,
Force_dev &fo,
float eps2){
#if 0
float dx = float2_sub(jp.pos[0], ip.pos[0]);
float dy = float2_sub(jp.pos[1], ip.pos[1]);
float dz = float2_sub(jp.pos[2], ip.pos[2]);
float dvx = jp.vel[0] - ip.vel[0];
float dvy = jp.vel[1] - ip.vel[1];
float dvz = jp.vel[2] - ip.vel[2];
float dax = jp.acc[0] - ip.acc[0];
float day = jp.acc[1] - ip.acc[1];
float daz = jp.acc[2] - ip.acc[2];
#else
float dx = (jp.posH.x - ip.posH.x) + (jp.posL.x - ip.posL.x);
float dy = (jp.posH.y - ip.posH.y) + (jp.posL.y - ip.posL.y);
float dz = (jp.posH.z - ip.posH.z) + (jp.posL.z - ip.posL.z);
float dvx = jp.vel.x - ip.vel.x;
float dvy = jp.vel.y - ip.vel.y;
float dvz = jp.vel.z - ip.vel.z;
#endif
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float drdv = dx*dvx + dy*dvy + dz*dvz;
float rinv1 = rsqrtf(r2);
float rinv2 = rinv1 * rinv1;
float alpha = (drdv)*rinv2;
// rinv1 *= jp.mass;
rinv1 *= jp.posH.w;
float rinv3 = rinv1 * rinv2;
// float pot = rinv1;
float ax = rinv3*dx;
float ay = rinv3*dy;
float az = rinv3*dz;
float jx = rinv3*dvx + (-3.f*alpha)*ax;
float jy = rinv3*dvy + (-3.f*alpha)*ay;
float jz = rinv3*dvz + (-3.f*alpha)*az;
#if 0
if(r2 != eps2){
fo.pot = float2_accum(fo.pot, pot);
}
#endif
fo.acc[0] = float2_accum(fo.acc[0], ax);
fo.acc[1] = float2_accum(fo.acc[1], ay);
fo.acc[2] = float2_accum(fo.acc[2], az);
fo.jrk[0] += jx;
fo.jrk[1] += jy;
fo.jrk[2] += jz;
}
__global__ void h4_gravity(
int ni,
int nj,
Predictor ipred[],
Predictor jpred[],
Force_dev force[][NJBLOCKS_],
float eps2){
int ibid = blockIdx.x;
int jbid = blockIdx.y;
int tid = threadIdx.x;
int iaddr = tid + NTHREADS * ibid;
int jstart = (nj * (jbid )) / NJBLOCKS;
int jend = (nj * (jbid+1)) / NJBLOCKS;
// small kernel opt
int nskip = 1;
int niloc = ni - NTHREADS * ibid;
if(niloc <= NTHREADS/2) nskip = 2;
if(niloc <= NTHREADS/4) nskip = 4;
if(niloc <= NTHREADS/8) nskip = 8;
if(niloc <= NTHREADS/16) nskip = 16;
if(niloc <= NTHREADS/32) nskip = 32;
int joff = tid / (NTHREADS/nskip);
__shared__ Predictor jpshare[NTHREADS];
Force_dev fo;
Predictor ip = ipred[tid % (NTHREADS/nskip) + NTHREADS * ibid];
for(int j=jstart; j<jend; j+=NTHREADS){
__syncthreads();
#if 0
jpshare[tid] = jpred[j+tid];
#else
float4 *src = (float4 *)&jpred[j];
float4 *dst = (float4 *)jpshare;
for(int it=0; it<sizeof(Predictor)/sizeof(float4); it++){
dst[tid] = src[tid];
dst += NTHREADS;
src += NTHREADS;
}
#endif
__syncthreads();
if(jend-j < NTHREADS){
for(int jj=0; jj<jend-j; jj+=nskip){
Predictor &jp = jpshare[jj+joff];
if(jj+joff < jend-j) h4_kernel(ip, jp, fo, eps2);
}
}else{
#if 0
#pragma unroll
for(int jj=0; jj<NTHREADS; jj+=nskip){
Predictor &jp = jpshare[jj+joff];
h6_kernel(ip, jp, fo, eps2);
}
#else
for(int jj=0; jj<NTHREADS; jj+=4*nskip){
Predictor &jp0 = jpshare[0*nskip+jj+joff];
Predictor &jp1 = jpshare[1*nskip+jj+joff];
Predictor &jp2 = jpshare[2*nskip+jj+joff];
Predictor &jp3 = jpshare[3*nskip+jj+joff];
h4_kernel(ip, jp0, fo, eps2);
h4_kernel(ip, jp1, fo, eps2);
h4_kernel(ip, jp2, fo, eps2);
h4_kernel(ip, jp3, fo, eps2);
}
#endif
}
}
// horizontal reduce
// __shared__ Force_dev foshare[NTHREADS];
Force_dev *foshare = (Force_dev *)jpshare;
__syncthreads();
foshare[tid] = fo;
__syncthreads();
if(nskip > 1){
if(tid < NTHREADS/2){
force_reduce(foshare[tid], foshare[tid + NTHREADS/2]);
}
__syncthreads();
}
if(nskip > 2){
if(tid < NTHREADS/4){
force_reduce(foshare[tid], foshare[tid + NTHREADS/4]);
}
__syncthreads();
}
if(nskip > 4){
if(tid < NTHREADS/8){
force_reduce(foshare[tid], foshare[tid + NTHREADS/8]);
}
__syncthreads();
}
if(nskip > 8){
if(tid < NTHREADS/16){
force_reduce(foshare[tid], foshare[tid + NTHREADS/16]);
}
__syncthreads();
}
if(nskip > 16){
if(tid < NTHREADS/32){
force_reduce(foshare[tid], foshare[tid + NTHREADS/32]);
}
__syncthreads();
}
// store
if(tid < niloc){
fo = foshare[tid];
force[iaddr][jbid] = fo;
}
}
#ifdef GPU_REDUCE
__global__ void reduce_kernel(
Force_dev fo_dev[][NJBLOCKS_],
Force_dev fo_reduce[])
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int ioff = bid * NREDUCE;
#if 0
__shared__ Force_dev fo_share[NTHREADS];
#else
__shared__ Predictor jpshare[NTHREADS];
Force_dev *fo_share = (Force_dev *)jpshare;
#endif
#if 0
fo_share[tid] = fo_dev[ioff][tid];
#else
float4 *src = (float4 *)fo_dev[ioff];
float4 *dst = (float4 *)fo_share;
for(int it=0; it<sizeof(Force_dev)/sizeof(float4); it++){
dst[tid] = src[tid];
dst += NTHREADS;
src += NTHREADS;
}
#endif
__syncthreads();
int n = NJBLOCKS_;
while(n > 1){
n /= 2;
if(tid % NJBLOCKS_ < n){
force_reduce(fo_share[tid], fo_share[tid + n]);
}
}
__syncthreads();
if(tid % NJBLOCKS_ == 0){
// fo_reduce[ioff + tid / NJBLOCKS_] = fo_share[tid];
fo_share[tid / NJBLOCKS_] = fo_share[tid];
}
__syncthreads();
#if 0
if(tid < NREDUCE){
fo_reduce[ioff + tid] = fo_share[tid];
}
#else
if(tid < NREDUCE * sizeof(Force_dev) / sizeof(float)){ // (tid < 96)
float *dst = (float *)&fo_reduce[ioff];
float *src = (float *)fo_share;
dst[tid] = src[tid];
}
#endif
}
#endif
extern double wtime();
void calc_force(
int nitot,
int nj,
float eps2,
Predictor ipred[],
Predictor jpred[],
Force force[],
double &t1,
double &t_send,
double &t_recv){
static Predictor *jp_dev = NULL;
static Predictor *ip_dev = NULL;
static Force_dev (*fo_dev)[NJBLOCKS_] = NULL;
#ifdef GPU_REDUCE
static Force_dev (*fo_reduce) = NULL;
static Force_dev (*fo_host) = NULL;
#else
static Force_dev (*fo_host)[NJBLOCKS_] = NULL;
#endif
if(jp_dev == NULL){ // first call
/*
const int dev = 0;
// CUDA_SAFE_CALL(cudaSetDevice(dev));
checkCudaErrors(cudaSetDevice(dev));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
printf("GPU: %s\n", deviceProp.name);
*/
cudaMalloc((void **)&jp_dev, (nj + NTHREADS) * sizeof(Predictor));
cudaMalloc((void **)&ip_dev, NIMAX * sizeof(Predictor));
cudaMalloc((void **)&fo_dev, NIMAX * sizeof(*fo_dev));
cudaMemset(fo_dev, 0, NIMAX * sizeof(*fo_dev));
#ifdef GPU_REDUCE
cudaMalloc((void **)&fo_reduce, NIMAX * sizeof(*fo_reduce));
#endif
cudaMallocHost((void **)&fo_host, NIMAX * sizeof(*fo_host));
}
cudaMemcpy(jp_dev, jpred, nj * sizeof(Predictor), cudaMemcpyHostToDevice);
t1 = wtime();
int nimax = NIMAX;
for(int ioff=0; ioff<nitot; ioff+=nimax){
int ni = std::min(nimax, nitot-ioff);
double t2 = wtime();
cudaMemcpy(ip_dev, ipred+ioff, ni * sizeof(Predictor), cudaMemcpyHostToDevice);
double t3 = wtime();
t_send += t3 - t2;
// kernel call
int niblocks = 1 + (ni-1) / NTHREADS;
dim3 grid(niblocks, NJBLOCKS, 1);
dim3 threads(NTHREADS, 1, 1);
// std::cerr << "call h6_gravity " << niblocks << std::endl;
//int sharedMemSize = NTHREADS * sizeof(Predictor);
// h6_gravity <<< grid, threads, sharedMemSize >>>
// (ni, nj, ip_dev, jp_dev, fo_dev, eps2);
h4_gravity <<< grid, threads >>>
(ni, nj, ip_dev, jp_dev, fo_dev, eps2);
#ifdef GPU_REDUCE
dim3 grid_reduce(1 + (ni-1)/NREDUCE, 1, 1);
reduce_kernel <<< grid_reduce, threads >>> (fo_dev, fo_reduce);
cudaThreadSynchronize();
double t4 = wtime();
cudaMemcpy(fo_host, fo_reduce, ni * sizeof(*fo_reduce), cudaMemcpyDeviceToHost);
double t5 = wtime();
t_recv += t5 - t4;
for(int i=0; i<ni; i++){
Force f; // 0 flashed by the constructer
Force_dev &fo = fo_host[i];
f.acc.x = float2_reduce(fo.acc[0]);
f.acc.y = float2_reduce(fo.acc[1]);
f.acc.z = float2_reduce(fo.acc[2]);
// f.pot = float2_reduce(fo.pot);
f.jrk.x = fo.jrk[0];
f.jrk.y = fo.jrk[1];
f.jrk.z = fo.jrk[2];
force[ioff + i] = f;
}
#else
cudaMemcpy(fo_host, fo_dev, ni * sizeof(*fo_dev), cudaMemcpyDeviceToHost);
// std::cerr << "done" << std::endl;
for(int i=0; i<ni; i++){
Force f; // 0 flashed by the constructer
for(int jb=0; jb<NJBLOCKS; jb++){
Force_dev &fo = fo_host[i][jb];
f.acc.x += float2_reduce(fo.acc[0]);
f.acc.y += float2_reduce(fo.acc[1]);
f.acc.z += float2_reduce(fo.acc[2]);
f.pot -= float2_reduce(fo.pot);
f.jrk.x += fo.jrk[0];
f.jrk.y += fo.jrk[1];
f.jrk.z += fo.jrk[2];
f.snp.x += fo.snp[0];
f.snp.y += fo.snp[1];
f.snp.z += fo.snp[2];
}
force[ioff + i] = f;
}
#endif
}
}
__global__ void pot_kernel(
int js,
int je,
float eps2,
Posm posm[],
float2 pot[]){
int bid = blockIdx.x;
int tid = threadIdx.x;
int iaddr = tid + NTHREADS * bid;
Posm ip = posm[iaddr];
float2 poti = make_float2(0.f, 0.f);
for(int j=js; j<je; j+=NTHREADS){
__shared__ Posm posmshare[NTHREADS];
__syncthreads();
posmshare[tid] = posm[j + tid];
__syncthreads();
int njj = NTHREADS < je-j ? NTHREADS : je-j;
for(int jj=0; jj< njj; jj++){
Posm &jp = posmshare[jj];
float dx = float2_sub(jp.pos[0], ip.pos[0]);
float dy = float2_sub(jp.pos[1], ip.pos[1]);
float dz = float2_sub(jp.pos[2], ip.pos[2]);
float r2 = eps2 + dx*dx + dy*dy + dz*dz;
float mrinv = jp.mass * rsqrtf(r2);
if(r2 > eps2) poti = float2_accum(poti, mrinv);
}
}
pot[iaddr] = poti;
}
void calc_pot(
int ni,
int js,
int je,
float eps2,
Posm posm[],
double dpot[]){
Posm *posm_dev;
float2 *pot, *pot_dev;
cudaMalloc((void **)&posm_dev, (ni+NTHREADS) * sizeof(Posm));
cudaMalloc((void **)&pot_dev, (ni+NTHREADS) * sizeof(float2));
cudaMallocHost((void **)&pot, (ni+NTHREADS) * sizeof(float2));
cudaMemcpy(posm_dev, posm, ni * sizeof(Posm), cudaMemcpyHostToDevice);
int nblocks = 1 + (ni-1) / NTHREADS;
dim3 grid(nblocks, 1, 1);
dim3 threads(NTHREADS, 1, 1);
int sharedMemSize = NTHREADS * sizeof(Posm);
pot_kernel <<< grid, threads, sharedMemSize >>>
(js, je, eps2, posm_dev, pot_dev);
cudaMemcpy(pot, pot_dev, ni * sizeof(float2), cudaMemcpyDeviceToHost);
for(int i=0; i<ni; i++){
dpot[i] = -float2_reduce(pot[i]);
}
cudaFree(posm_dev);
cudaFree(pot_dev);
cudaFreeHost(pot);
}
void CUDA_MPI_Init(int myRank){
int numGPU;
checkCudaErrors(cudaGetDeviceCount(&numGPU));
// int dev; // golowood using all 8800
// if(myRank < 8) dev = 0; else dev = 1;
// const int dev = 0; // golowood using all 280
// if(myRank < 8) dev = 0; else dev = 1;
const int dev = myRank % numGPU;
checkCudaErrors(cudaSetDevice(dev));
// checkCudaErrors(cudaGetDevice(&dev));
// printf("Active Cuda Dev. : %d ; Total Number of Cuda Dev. : %d \n", dev, numGPU);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
printf("Rank %04d : GPU %d : %s \n", myRank, dev, deviceProp.name);
cudaFuncSetCacheConfig(h4_gravity, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(reduce_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(pot_kernel, cudaFuncCachePreferShared);
}
/*
void CUDA_MPI_Init(int myRank){
int numGPU;
checkCudaErrors(cudaGetDeviceCount(&numGPU));
const int dev = myRank % numGPU;
checkCudaErrors(cudaSetDevice(dev));
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
printf("Rank %02d : GPU %d : %s\n", myRank, dev, deviceProp.name);
cudaFuncSetCacheConfig(h6_gravity, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(reduce_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(pot_kernel, cudaFuncCachePreferShared);
}
*/ |
7fda1303c019f69d8fb8c504f021100ddbad44b8.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file test_spatial_histogram.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#define BLOCK_TILE_LOAD_V4 1
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <vector>
#include <limits>
#include <cmath>
#include <functional>
#include <algorithm>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include <helper_cuda.h>
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/device/device_spatial_histogram.cuh"
#include "rd/gpu/util/dev_samples_set.cuh"
#include "rd/utils/bounding_box.hpp"
#include "rd/utils/histogram.hpp"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "rd/utils/rd_params.hpp"
#include "cub/test_util.h"
template <int DIM, typename T>
void test(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\t\t[--f=<file name to load>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("f"))
{
args.GetCmdLineArgument("f", fSParams.file);
fSParams.loadFromFile = true;
}
else
{
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
checkCudaErrors(deviceInit(fParams.devId));
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 2D: " << std::endl;
test<2>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 3D: " << std::endl;
test<3>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 4D: " << std::endl;
test<4>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 5D: " << std::endl;
test<5>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 6D: " << std::endl;
test<6>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 2D: " << std::endl;
test<2>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 3D: " << std::endl;
test<3>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 4D: " << std::endl;
test<4>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 5D: " << std::endl;
test<5>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 6D: " << std::endl;
test<6>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
checkCudaErrors(deviceReset());
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
struct HistogramMapFuncGold
{
rd::BoundingBox<T> const &bb;
HistogramMapFuncGold(rd::BoundingBox<T> const &bb)
:
bb(bb)
{}
size_t operator()(
T const * sample,
std::vector<size_t> const & binsCnt)
{
std::vector<size_t> binIdx(bb.dim, 0);
// get sample's bin [x,y,z...n] idx
for (size_t i = 0; i < bb.dim; ++i)
{
/*
* translate each sample coordinate to the common origin (by distracting minimum)
* then divide shifted coordinate by current dimension bin width and get the
* floor of this value (counting from zero!) which is our bin idx we search for.
*/
if (bb.dist[i] < std::numeric_limits<T>::epsilon())
{
binIdx[i] = 0;
sample++;
continue;
}
T normCord = std::abs(*sample - bb.min(i));
T step = bb.dist[i] / binsCnt[i];
if (std::abs(normCord - bb.dist[i]) <= std::numeric_limits<T>::epsilon())
{
binIdx[i] = binsCnt[i]-1;
}
else
{
binIdx[i] = ::floor(normCord / step);
}
sample++;
}
/*
* Calculate global idx value linearizing bin idx
* idx = k_0 + sum_{i=2}^{dim}{k_i mul_{j=i-1}^{1}bDim_j}
*/
size_t idx = binIdx[0];
size_t tmp;
for (size_t i = 1; i < bb.dim; ++i)
{
tmp = 1;
for (int j = (int)i - 1; j >= 0; --j)
{
tmp *= binsCnt[j];
}
idx += binIdx[i]*tmp;
}
return idx;
}
};
template <typename T>
void histogramGold(
rd::RDParams<T> &rdp,
T const *P,
std::vector<size_t>const &binsCnt,
rd::BoundingBox<T>const & bbox,
rd::Histogram<T> &hist)
{
HistogramMapFuncGold<T> mapFunc(bbox);
hist.setBinCnt(binsCnt);
hist.getHist(P, rdp.np, mapFunc);
}
template <
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testDeviceSpatialHistogram(
rd::RDParams<T> const &rdp,
T const *d_P,
int *d_hist,
int stride,
std::vector<size_t>const &binsCnt,
size_t numBins,
rd::BoundingBox<T> const &bboxGold,
rd::Histogram<T> const &histGold)
{
std::cout << rd::HLINE << "\n";
std::cout << "testDeviceSpatialHistogram:" << "\n";
std::cout << "DataMemoryLayout: " <<
rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name << "\n";
void *d_tempStorage = NULL;
unsigned long long int tempStorageBytes = 0;
typedef int AliasedBinCnt[DIM];
AliasedBinCnt aliasedBinCnt;
rd::gpu::BoundingBox<DIM, T> d_bbox;
// initialize data
for (int d = 0; d < DIM; ++d)
{
d_bbox.bbox[d * 2] = bboxGold.min(d);
d_bbox.bbox[d * 2 + 1] = bboxGold.max(d);
d_bbox.dist[d] = bboxGold.dist[d];
aliasedBinCnt[d] = binsCnt[d];
}
checkCudaErrors(hipMemset(d_hist, 0, numBins * sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
hipError_t error = rd::gpu::DeviceHistogram::spatialHistogram<DIM, INPUT_MEM_LAYOUT>(
d_tempStorage,
tempStorageBytes,
d_P,
d_hist,
rdp.np,
aliasedBinCnt,
d_bbox,
stride,
0,
true);
checkCudaErrors(error);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc((void**)&d_tempStorage, tempStorageBytes));
error = rd::gpu::DeviceHistogram::spatialHistogram<DIM, INPUT_MEM_LAYOUT>(
d_tempStorage,
tempStorageBytes,
d_P,
d_hist,
rdp.np,
aliasedBinCnt,
d_bbox,
stride,
0,
true);
checkCudaErrors(error);
checkCudaErrors(hipDeviceSynchronize());
int *h_hist = new int[numBins];
std::vector<int> histGoldValues;
for (size_t v : histGold.hist)
{
histGoldValues.push_back((int)v);
}
checkCudaErrors(hipMemcpy(h_hist, d_hist, numBins * sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
bool result = rd::checkResult(histGoldValues.data(), h_hist, numBins);
if (result)
{
std::cout << ">>>> SUCCESS!\n";
}
delete[] h_hist;
checkCudaErrors(hipFree(d_tempStorage));
}
template <int DIM, typename T>
void test(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> &sp)
{
std::vector<std::string> samplesDir{"../../examples/data/nd_segments/",
"../../examples/data/spirals/"};
rd::gpu::Samples<T> d_samplesSet(rdp, sp, samplesDir, DIM);
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
// std::vector<size_t> binsCnt{1,1,1};
std::vector<size_t> binsCnt(DIM);
for (int d = 0; d < DIM; ++d)
{
binsCnt[d] = 2 << d;
}
size_t numBins = std::accumulate(binsCnt.begin(), binsCnt.end(),
1, std::multiplies<size_t>());
T *d_PRowMajor, *d_PColMajor;
T *h_P;
int *d_hist;
// allocate containers
checkCudaErrors(hipMalloc((void**)&d_PRowMajor, rdp.np * DIM * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_PColMajor, rdp.np * DIM * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_hist, numBins * sizeof(int)));
h_P = new T[rdp.np * DIM];
// initialize data
checkCudaErrors(hipMemcpy(d_PRowMajor, d_samplesSet.samples_, rdp.np * DIM * sizeof(T),
hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(h_P, d_samplesSet.samples_, rdp.np * DIM * sizeof(T),
hipMemcpyDeviceToHost));
T * tmp = new T[rdp.np * DIM];
rd::transposeTable(h_P, tmp, rdp.np, DIM);
checkCudaErrors(hipMemcpy(d_PColMajor, tmp, rdp.np * DIM * sizeof(T),
hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
delete[] tmp;
tmp = nullptr;
// draw test samples set if verbose
std::ostringstream os;
if (rdp.verbose && DIM <= 3)
{
os << typeid(T).name() << "_" << DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE HISTOGRAM
//---------------------------------------------------
rd::Histogram<T> histGold;
rd::BoundingBox<T> bboxGold(h_P, rdp.np, rdp.dim);
bboxGold.calcDistances();
histogramGold(rdp, h_P, binsCnt, bboxGold, histGold);
if (rdp.verbose)
{
bboxGold.print();
std::cout << "hist: [";
for (size_t h : histGold.hist)
{
std::cout << ", " << h;
}
std::cout << "]\n";
}
//---------------------------------------------------
// GPU HISTOGRAM
//---------------------------------------------------
testDeviceSpatialHistogram<DIM, rd::ROW_MAJOR>(rdp, d_PRowMajor, d_hist, 1, binsCnt, numBins,
bboxGold, histGold);
testDeviceSpatialHistogram<DIM, rd::COL_MAJOR>(rdp, d_PColMajor, d_hist, rdp.np, binsCnt,
numBins, bboxGold, histGold);
// clean-up
delete[] h_P;
checkCudaErrors(hipFree(d_PRowMajor));
checkCudaErrors(hipFree(d_PColMajor));
checkCudaErrors(hipFree(d_hist));
}
| 7fda1303c019f69d8fb8c504f021100ddbad44b8.cu | /**
* @file test_spatial_histogram.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#define BLOCK_TILE_LOAD_V4 1
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <vector>
#include <limits>
#include <cmath>
#include <functional>
#include <algorithm>
#ifdef RD_USE_OPENMP
#include <omp.h>
#endif
#include <helper_cuda.h>
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/device/device_spatial_histogram.cuh"
#include "rd/gpu/util/dev_samples_set.cuh"
#include "rd/utils/bounding_box.hpp"
#include "rd/utils/histogram.hpp"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/memory.h"
#include "rd/utils/name_traits.hpp"
#include "rd/utils/rd_params.hpp"
#include "cub/test_util.h"
template <int DIM, typename T>
void test(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\t\t[--f=<file name to load>]\n"
"\n", argv[0]);
exit(0);
}
if (args.CheckCmdLineFlag("f"))
{
args.GetCmdLineArgument("f", fSParams.file);
fSParams.loadFromFile = true;
}
else
{
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
checkCudaErrors(deviceInit(fParams.devId));
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 2D: " << std::endl;
test<2>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 3D: " << std::endl;
test<3>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 4D: " << std::endl;
test<4>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 5D: " << std::endl;
test<5>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT 6D: " << std::endl;
test<6>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 2D: " << std::endl;
test<2>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 3D: " << std::endl;
test<3>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 4D: " << std::endl;
test<4>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 5D: " << std::endl;
test<5>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE 6D: " << std::endl;
test<6>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
checkCudaErrors(deviceReset());
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
struct HistogramMapFuncGold
{
rd::BoundingBox<T> const &bb;
HistogramMapFuncGold(rd::BoundingBox<T> const &bb)
:
bb(bb)
{}
size_t operator()(
T const * sample,
std::vector<size_t> const & binsCnt)
{
std::vector<size_t> binIdx(bb.dim, 0);
// get sample's bin [x,y,z...n] idx
for (size_t i = 0; i < bb.dim; ++i)
{
/*
* translate each sample coordinate to the common origin (by distracting minimum)
* then divide shifted coordinate by current dimension bin width and get the
* floor of this value (counting from zero!) which is our bin idx we search for.
*/
if (bb.dist[i] < std::numeric_limits<T>::epsilon())
{
binIdx[i] = 0;
sample++;
continue;
}
T normCord = std::abs(*sample - bb.min(i));
T step = bb.dist[i] / binsCnt[i];
if (std::abs(normCord - bb.dist[i]) <= std::numeric_limits<T>::epsilon())
{
binIdx[i] = binsCnt[i]-1;
}
else
{
binIdx[i] = std::floor(normCord / step);
}
sample++;
}
/*
* Calculate global idx value linearizing bin idx
* idx = k_0 + sum_{i=2}^{dim}{k_i mul_{j=i-1}^{1}bDim_j}
*/
size_t idx = binIdx[0];
size_t tmp;
for (size_t i = 1; i < bb.dim; ++i)
{
tmp = 1;
for (int j = (int)i - 1; j >= 0; --j)
{
tmp *= binsCnt[j];
}
idx += binIdx[i]*tmp;
}
return idx;
}
};
template <typename T>
void histogramGold(
rd::RDParams<T> &rdp,
T const *P,
std::vector<size_t>const &binsCnt,
rd::BoundingBox<T>const & bbox,
rd::Histogram<T> &hist)
{
HistogramMapFuncGold<T> mapFunc(bbox);
hist.setBinCnt(binsCnt);
hist.getHist(P, rdp.np, mapFunc);
}
template <
int DIM,
rd::DataMemoryLayout INPUT_MEM_LAYOUT,
typename T>
void testDeviceSpatialHistogram(
rd::RDParams<T> const &rdp,
T const *d_P,
int *d_hist,
int stride,
std::vector<size_t>const &binsCnt,
size_t numBins,
rd::BoundingBox<T> const &bboxGold,
rd::Histogram<T> const &histGold)
{
std::cout << rd::HLINE << "\n";
std::cout << "testDeviceSpatialHistogram:" << "\n";
std::cout << "DataMemoryLayout: " <<
rd::DataMemoryLayoutNameTraits<INPUT_MEM_LAYOUT>::name << "\n";
void *d_tempStorage = NULL;
unsigned long long int tempStorageBytes = 0;
typedef int AliasedBinCnt[DIM];
AliasedBinCnt aliasedBinCnt;
rd::gpu::BoundingBox<DIM, T> d_bbox;
// initialize data
for (int d = 0; d < DIM; ++d)
{
d_bbox.bbox[d * 2] = bboxGold.min(d);
d_bbox.bbox[d * 2 + 1] = bboxGold.max(d);
d_bbox.dist[d] = bboxGold.dist[d];
aliasedBinCnt[d] = binsCnt[d];
}
checkCudaErrors(cudaMemset(d_hist, 0, numBins * sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
cudaError_t error = rd::gpu::DeviceHistogram::spatialHistogram<DIM, INPUT_MEM_LAYOUT>(
d_tempStorage,
tempStorageBytes,
d_P,
d_hist,
rdp.np,
aliasedBinCnt,
d_bbox,
stride,
0,
true);
checkCudaErrors(error);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc((void**)&d_tempStorage, tempStorageBytes));
error = rd::gpu::DeviceHistogram::spatialHistogram<DIM, INPUT_MEM_LAYOUT>(
d_tempStorage,
tempStorageBytes,
d_P,
d_hist,
rdp.np,
aliasedBinCnt,
d_bbox,
stride,
0,
true);
checkCudaErrors(error);
checkCudaErrors(cudaDeviceSynchronize());
int *h_hist = new int[numBins];
std::vector<int> histGoldValues;
for (size_t v : histGold.hist)
{
histGoldValues.push_back((int)v);
}
checkCudaErrors(cudaMemcpy(h_hist, d_hist, numBins * sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
bool result = rd::checkResult(histGoldValues.data(), h_hist, numBins);
if (result)
{
std::cout << ">>>> SUCCESS!\n";
}
delete[] h_hist;
checkCudaErrors(cudaFree(d_tempStorage));
}
template <int DIM, typename T>
void test(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> &sp)
{
std::vector<std::string> samplesDir{"../../examples/data/nd_segments/",
"../../examples/data/spirals/"};
rd::gpu::Samples<T> d_samplesSet(rdp, sp, samplesDir, DIM);
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
// std::vector<size_t> binsCnt{1,1,1};
std::vector<size_t> binsCnt(DIM);
for (int d = 0; d < DIM; ++d)
{
binsCnt[d] = 2 << d;
}
size_t numBins = std::accumulate(binsCnt.begin(), binsCnt.end(),
1, std::multiplies<size_t>());
T *d_PRowMajor, *d_PColMajor;
T *h_P;
int *d_hist;
// allocate containers
checkCudaErrors(cudaMalloc((void**)&d_PRowMajor, rdp.np * DIM * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_PColMajor, rdp.np * DIM * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_hist, numBins * sizeof(int)));
h_P = new T[rdp.np * DIM];
// initialize data
checkCudaErrors(cudaMemcpy(d_PRowMajor, d_samplesSet.samples_, rdp.np * DIM * sizeof(T),
cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(h_P, d_samplesSet.samples_, rdp.np * DIM * sizeof(T),
cudaMemcpyDeviceToHost));
T * tmp = new T[rdp.np * DIM];
rd::transposeTable(h_P, tmp, rdp.np, DIM);
checkCudaErrors(cudaMemcpy(d_PColMajor, tmp, rdp.np * DIM * sizeof(T),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
delete[] tmp;
tmp = nullptr;
// draw test samples set if verbose
std::ostringstream os;
if (rdp.verbose && DIM <= 3)
{
os << typeid(T).name() << "_" << DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE HISTOGRAM
//---------------------------------------------------
rd::Histogram<T> histGold;
rd::BoundingBox<T> bboxGold(h_P, rdp.np, rdp.dim);
bboxGold.calcDistances();
histogramGold(rdp, h_P, binsCnt, bboxGold, histGold);
if (rdp.verbose)
{
bboxGold.print();
std::cout << "hist: [";
for (size_t h : histGold.hist)
{
std::cout << ", " << h;
}
std::cout << "]\n";
}
//---------------------------------------------------
// GPU HISTOGRAM
//---------------------------------------------------
testDeviceSpatialHistogram<DIM, rd::ROW_MAJOR>(rdp, d_PRowMajor, d_hist, 1, binsCnt, numBins,
bboxGold, histGold);
testDeviceSpatialHistogram<DIM, rd::COL_MAJOR>(rdp, d_PColMajor, d_hist, rdp.np, binsCnt,
numBins, bboxGold, histGold);
// clean-up
delete[] h_P;
checkCudaErrors(cudaFree(d_PRowMajor));
checkCudaErrors(cudaFree(d_PColMajor));
checkCudaErrors(cudaFree(d_hist));
}
|
be0d0c66908013d18643b6aaa0d5f02d3aa09adf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 16
__global__ void add(int* a, int* b, int* c) {
int localIdx = blockIdx.x*blockDim.x + threadIdx.x;
if( localIdx < N ) {
c[localIdx] = a[localIdx] + b[localIdx];
}
}
int main( int argc, char** argv ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// Initialize arrays a and b with data
for (int i=0; i < N; i++) {
a[i] = 2*i;
b[i] = -i;
}
// TODO: Allocate memory on the GPU for dev_a, dev_b, dev_c
// TODO: they all should be able store N elements of size int
// TODO: Copy the data from "a" to dev_a and from "b" to dev_b
// TODO: remember about the direction
// Compute the number of block necessary based on a constant number of threads per block
// Be careful - this can launch more threads than we need, we need to handle this in the kernel!
int threadsPerBlock = 1024;
int blocks = (int)ceil((float)N/threadsPerBlock);
// Launch the kernel
hipLaunchKernelGGL(( add), dim3(blocks),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_c);
// TODO: Move the result back from dev_c to "c"
for (int i=0; i < N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// TODO: remember to free all the memory you allocated.
return 0;
}
| be0d0c66908013d18643b6aaa0d5f02d3aa09adf.cu | #include <stdio.h>
#include <math.h>
#define N 16
__global__ void add(int* a, int* b, int* c) {
int localIdx = blockIdx.x*blockDim.x + threadIdx.x;
if( localIdx < N ) {
c[localIdx] = a[localIdx] + b[localIdx];
}
}
int main( int argc, char** argv ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// Initialize arrays a and b with data
for (int i=0; i < N; i++) {
a[i] = 2*i;
b[i] = -i;
}
// TODO: Allocate memory on the GPU for dev_a, dev_b, dev_c
// TODO: they all should be able store N elements of size int
// TODO: Copy the data from "a" to dev_a and from "b" to dev_b
// TODO: remember about the direction
// Compute the number of block necessary based on a constant number of threads per block
// Be careful - this can launch more threads than we need, we need to handle this in the kernel!
int threadsPerBlock = 1024;
int blocks = (int)ceil((float)N/threadsPerBlock);
// Launch the kernel
add<<<blocks,threadsPerBlock>>>(dev_a, dev_b, dev_c);
// TODO: Move the result back from dev_c to "c"
for (int i=0; i < N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// TODO: remember to free all the memory you allocated.
return 0;
}
|
6d542844d85b743e1d2be2157837d8b13306fc8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| 6d542844d85b743e1d2be2157837d8b13306fc8b.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
87357369b3e537a70a92c4a0107cddcbe143b8f4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op_hip.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/multiply.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
public:
MultiplyTest()
: params(::testing::TestWithParam<UnaryOpInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in.data(), len, T(-1.0), T(1.0));
naiveScale(out_ref.data(), in.data(), params.scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in_view = raft::make_device_vector_view<const T>(in.data(), len);
auto scalar_view = raft::make_host_scalar_view<const T>(¶ms.scalar);
multiply_scalar(handle, in_view, out_view, scalar_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
hipStream_t stream;
UnaryOpInputs<T> params;
rmm::device_uvector<T> in, out_ref, out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestF, ::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
| 87357369b3e537a70a92c4a0107cddcbe143b8f4.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include "unary_op.cuh"
#include <gtest/gtest.h>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/multiply.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace linalg {
template <typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
public:
MultiplyTest()
: params(::testing::TestWithParam<UnaryOpInputs<T>>::GetParam()),
stream(resource::get_cuda_stream(handle)),
in(params.len, stream),
out_ref(params.len, stream),
out(params.len, stream)
{
}
protected:
void SetUp() override
{
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
raft::random::RngState r(params.seed);
int len = params.len;
uniform(handle, r, in.data(), len, T(-1.0), T(1.0));
naiveScale(out_ref.data(), in.data(), params.scalar, len, stream);
auto out_view = raft::make_device_vector_view(out.data(), len);
auto in_view = raft::make_device_vector_view<const T>(in.data(), len);
auto scalar_view = raft::make_host_scalar_view<const T>(¶ms.scalar);
multiply_scalar(handle, in_view, out_view, scalar_view);
resource::sync_stream(handle, stream);
}
protected:
raft::resources handle;
cudaStream_t stream;
UnaryOpInputs<T> params;
rmm::device_uvector<T> in, out_ref, out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestF, ::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_SUITE_P(MultiplyTests, MultiplyTestD, ::testing::ValuesIn(inputsd));
} // end namespace linalg
} // end namespace raft
|
144409071a703caa6f16b1635b53f133f44abe5c.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************/
// Author: Xuefeng Ding <xuefeng.ding.physics@gmail.com>
// Insitute: Gran Sasso Science Institute, L'Aquila, 67100, Italy
// Date: 2018 April 7th
// Version: v1.0
// Description: GooStats, a statistical analysis toolkit that runs on GPU.
//
// All rights reserved. 2018 copyrighted.
/*****************************************************************************/
#include "PdfCache.h"
#include "goofit/Variable.h"
DEVICE_VECTOR<fptype>* PdfCache_dev_vec[100];
MEM_CONSTANT fptype* PdfCache_dev_array[100];
PdfCache* PdfCache::cache = nullptr;
PdfCache* PdfCache::get() {
if (!cache)
cache = new PdfCache();
return cache;
}
int PdfCache::registerFunc(PdfBase* pdf) {
static int pdf_Id = 0;
if (funMap.find(pdf) == funMap.end()) {
assert(pdf_Id < 100);
funMap.insert(std::make_pair(pdf, pdf_Id));
PdfCache_dev_vec[pdf_Id] = new DEVICE_VECTOR<fptype>((*(pdf->obsCBegin()))->numbins);
static fptype* dev_address[1];
dev_address[0] = thrust::raw_pointer_cast(PdfCache_dev_vec[pdf_Id]->data());
MEMCPY_TO_SYMBOL(
PdfCache_dev_array, dev_address, sizeof(fptype*), pdf_Id * sizeof(fptype*), hipMemcpyHostToDevice);
printf("PdfCache::registerFunc register [%p](%s) as [%d]\n", pdf, pdf->getName().c_str(), funMap.at(pdf));
pdf_Id++;
}
return funMap.at(pdf);
}
| 144409071a703caa6f16b1635b53f133f44abe5c.cu | /*****************************************************************************/
// Author: Xuefeng Ding <xuefeng.ding.physics@gmail.com>
// Insitute: Gran Sasso Science Institute, L'Aquila, 67100, Italy
// Date: 2018 April 7th
// Version: v1.0
// Description: GooStats, a statistical analysis toolkit that runs on GPU.
//
// All rights reserved. 2018 copyrighted.
/*****************************************************************************/
#include "PdfCache.h"
#include "goofit/Variable.h"
DEVICE_VECTOR<fptype>* PdfCache_dev_vec[100];
MEM_CONSTANT fptype* PdfCache_dev_array[100];
PdfCache* PdfCache::cache = nullptr;
PdfCache* PdfCache::get() {
if (!cache)
cache = new PdfCache();
return cache;
}
int PdfCache::registerFunc(PdfBase* pdf) {
static int pdf_Id = 0;
if (funMap.find(pdf) == funMap.end()) {
assert(pdf_Id < 100);
funMap.insert(std::make_pair(pdf, pdf_Id));
PdfCache_dev_vec[pdf_Id] = new DEVICE_VECTOR<fptype>((*(pdf->obsCBegin()))->numbins);
static fptype* dev_address[1];
dev_address[0] = thrust::raw_pointer_cast(PdfCache_dev_vec[pdf_Id]->data());
MEMCPY_TO_SYMBOL(
PdfCache_dev_array, dev_address, sizeof(fptype*), pdf_Id * sizeof(fptype*), cudaMemcpyHostToDevice);
printf("PdfCache::registerFunc register [%p](%s) as [%d]\n", pdf, pdf->getName().c_str(), funMap.at(pdf));
pdf_Id++;
}
return funMap.at(pdf);
}
|
9a202027eb38f1700aef9a32c2fd5e883be17d0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "solver.h"
Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule,
double learning_rate, double learning_rate_decay, int num_train, int num_val) {
this->model = model;
this->X_train = X_train, this->X_val = X_val;
this->y_train = y_train, this->y_val = y_val;
this->num_epoch = num_epoch;
this->update_rule = update_rule;
this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay;
this->num_train = num_train, this->num_val = num_val;
this->num_features = model->input_channels * model->input_h * model->input_w;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
}
float Solver::step(int start_X, int start_y) {
float temp_loss;
// std::cout << "start_X: " << start_X << std::endl;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], true, NULL, &temp_loss);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], true, NULL, &temp_loss);
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
for (int i = 0; i < model->num_layers; i++) {
if (model->layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)model->params[i];
int kernel_size = cur_params->C_in * cur_params->C_out * cur_params->filter_h * cur_params->filter_w;
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(model->cublas_handle, kernel_size,
&Salpha,
(float *)cur_params->dW, 1,
(float *)cur_params->W, 1));
checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_out,
&Salpha,
(float *)cur_params->db, 1,
(float *)cur_params->b, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(model->cublas_handle, kernel_size,
&Dalpha,
(double *)cur_params->dW, 1,
(double *)cur_params->W, 1));
checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_out,
&Dalpha,
(double *)cur_params->db, 1,
(double *)cur_params->b, 1));
}
}
else if (model->layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)model->params[i];
// {
// float *db_h = (float *)malloc(cur_params->C_out * sizeof(float));
// checkCudaErrors(hipMemcpy(db_h, cur_params->db, cur_params->C_out * sizeof(float), hipMemcpyDeviceToHost));
// for (int i = 0; i < cur_params->C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out,
&Salpha,
(float *)cur_params->dW, 1,
(float *)cur_params->W, 1));
checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->C_out,
&Salpha,
(float *)cur_params->db, 1,
(float *)cur_params->b, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out,
&Dalpha,
(double *)cur_params->dW, 1,
(double *)cur_params->W, 1));
checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->C_out,
&Dalpha,
(double *)cur_params->db, 1,
(double *)cur_params->b, 1));
}
// {
// float *db_h = (float *)malloc(cur_params->C_out * sizeof(float));
// checkCudaErrors(hipMemcpy(db_h, cur_params->b, cur_params->C_out * sizeof(float), hipMemcpyDeviceToHost));
// for (int i = 0; i < cur_params->C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
}
else if (model->layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)model->params[i];
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->sbmv_size,
&Salpha,
(float *)cur_params->dscale, 1,
(float *)cur_params->scale, 1));
checkCUBLAS(hipblasSaxpy(model->cublas_handle, cur_params->sbmv_size,
&Salpha,
(float *)cur_params->dbias, 1,
(float *)cur_params->bias, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->sbmv_size,
&Dalpha,
(double *)cur_params->dscale, 1,
(double *)cur_params->scale, 1));
checkCUBLAS(hipblasDaxpy(model->cublas_handle, cur_params->sbmv_size,
&Dalpha,
(double *)cur_params->dbias, 1,
(double *)cur_params->bias, 1));
}
}
}
}
checkCudaErrors(hipDeviceSynchronize());
return temp_loss;
}
void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) {
int batch_size = model->batch_size;
int num_train_batches = num_train / model->batch_size;
int num_val_batches = num_val / model->batch_size;
for (int i = 0; i < num_epoch; i++) {
for (int j = 0; j < num_train_batches; j++) {
int start_sample = j * num_features * batch_size;
checkCudaErrors(hipEventRecord(start));
float milli;
float temp_loss = step(start_sample, j * batch_size);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&milli, start, stop));
std::cout << "One forward, backward pass time(ms): " << milli << std::endl;
loss.push_back(temp_loss);
std::cout << "loss: " << temp_loss << std::endl;
}
int correct_count = 0;
for (int j = 0; j < num_val_batches; j++) {
int start_sample = j * num_features * batch_size;
int temp_correct_count;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], false, &temp_correct_count, NULL);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], false, &temp_correct_count, NULL);
correct_count += temp_correct_count;
}
val_acc.push_back(correct_count);
std::cout << "val_acc: " << val_acc[i] << std::endl;
// learning_rate *= learning_rate_decay;
// std::cout << "learning_rate: " << learning_rate << std::endl;
}
learning_rate *= learning_rate_decay;
}
void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) {
int batch_size = model->batch_size;
int num_iter = num_samples / batch_size;
*num_correct = 0;
for (int i = 0; i < num_iter; i++) {
int start_sample = i * num_features * batch_size;
int temp_correct_count;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], false, &temp_correct_count, NULL);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], false, &temp_correct_count, NULL);
*num_correct = *num_correct + temp_correct_count;
}
}
void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch) {
int batch_size = model->batch_size;
int num_train_batches = num_train / model->batch_size;
for (int i = 0; i < num_epoch; i++) {
for (int j = 0; j < num_train_batches; j++) {
int start_sample = j * num_features * batch_size;
checkCudaErrors(hipEventRecord(start));
float milli;
float temp_loss = step(start_sample, j * batch_size);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&milli, start, stop));
// std::cout << "One forward, backward pass time(ms): " << milli << std::endl;
loss.push_back(temp_loss);
time.push_back(milli);
// std::cout << "loss: " << temp_loss << std::endl;
}
}
learning_rate *= learning_rate_decay;
} | 9a202027eb38f1700aef9a32c2fd5e883be17d0d.cu | #include "solver.h"
Solver::Solver(NeuralNet *model, void *X_train, int *y_train, void *X_val, int *y_val, int num_epoch, UpdateRule update_rule,
double learning_rate, double learning_rate_decay, int num_train, int num_val) {
this->model = model;
this->X_train = X_train, this->X_val = X_val;
this->y_train = y_train, this->y_val = y_val;
this->num_epoch = num_epoch;
this->update_rule = update_rule;
this->learning_rate = learning_rate, this->learning_rate_decay = learning_rate_decay;
this->num_train = num_train, this->num_val = num_val;
this->num_features = model->input_channels * model->input_h * model->input_w;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
}
float Solver::step(int start_X, int start_y) {
float temp_loss;
// std::cout << "start_X: " << start_X << std::endl;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X_train)[start_X]), &y_train[start_y], true, NULL, &temp_loss);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X_train)[start_X]), &y_train[start_y], true, NULL, &temp_loss);
float Salpha = -learning_rate;
double Dalpha = -learning_rate;
if (update_rule == SGD) {
for (int i = 0; i < model->num_layers; i++) {
if (model->layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)model->params[i];
int kernel_size = cur_params->C_in * cur_params->C_out * cur_params->filter_h * cur_params->filter_w;
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(model->cublas_handle, kernel_size,
&Salpha,
(float *)cur_params->dW, 1,
(float *)cur_params->W, 1));
checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_out,
&Salpha,
(float *)cur_params->db, 1,
(float *)cur_params->b, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(model->cublas_handle, kernel_size,
&Dalpha,
(double *)cur_params->dW, 1,
(double *)cur_params->W, 1));
checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_out,
&Dalpha,
(double *)cur_params->db, 1,
(double *)cur_params->b, 1));
}
}
else if (model->layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)model->params[i];
// {
// float *db_h = (float *)malloc(cur_params->C_out * sizeof(float));
// checkCudaErrors(cudaMemcpy(db_h, cur_params->db, cur_params->C_out * sizeof(float), cudaMemcpyDeviceToHost));
// for (int i = 0; i < cur_params->C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out,
&Salpha,
(float *)cur_params->dW, 1,
(float *)cur_params->W, 1));
checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->C_out,
&Salpha,
(float *)cur_params->db, 1,
(float *)cur_params->b, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_in * cur_params->C_out,
&Dalpha,
(double *)cur_params->dW, 1,
(double *)cur_params->W, 1));
checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->C_out,
&Dalpha,
(double *)cur_params->db, 1,
(double *)cur_params->b, 1));
}
// {
// float *db_h = (float *)malloc(cur_params->C_out * sizeof(float));
// checkCudaErrors(cudaMemcpy(db_h, cur_params->b, cur_params->C_out * sizeof(float), cudaMemcpyDeviceToHost));
// for (int i = 0; i < cur_params->C_out; i++) {
// std::cout << db_h[i] << ' ';
// }
// std::cout << "\n";
// int n;
// std::cin >> n;
// }
}
else if (model->layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)model->params[i];
if (model->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->sbmv_size,
&Salpha,
(float *)cur_params->dscale, 1,
(float *)cur_params->scale, 1));
checkCUBLAS(cublasSaxpy(model->cublas_handle, cur_params->sbmv_size,
&Salpha,
(float *)cur_params->dbias, 1,
(float *)cur_params->bias, 1));
}
else if (model->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->sbmv_size,
&Dalpha,
(double *)cur_params->dscale, 1,
(double *)cur_params->scale, 1));
checkCUBLAS(cublasDaxpy(model->cublas_handle, cur_params->sbmv_size,
&Dalpha,
(double *)cur_params->dbias, 1,
(double *)cur_params->bias, 1));
}
}
}
}
checkCudaErrors(cudaDeviceSynchronize());
return temp_loss;
}
void Solver::train(std::vector<float> &loss, std::vector<int> &val_acc) {
int batch_size = model->batch_size;
int num_train_batches = num_train / model->batch_size;
int num_val_batches = num_val / model->batch_size;
for (int i = 0; i < num_epoch; i++) {
for (int j = 0; j < num_train_batches; j++) {
int start_sample = j * num_features * batch_size;
checkCudaErrors(cudaEventRecord(start));
float milli;
float temp_loss = step(start_sample, j * batch_size);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&milli, start, stop));
std::cout << "One forward, backward pass time(ms): " << milli << std::endl;
loss.push_back(temp_loss);
std::cout << "loss: " << temp_loss << std::endl;
}
int correct_count = 0;
for (int j = 0; j < num_val_batches; j++) {
int start_sample = j * num_features * batch_size;
int temp_correct_count;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X_val)[start_sample]), &y_val[j * batch_size], false, &temp_correct_count, NULL);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X_val)[start_sample]), &y_val[j * batch_size], false, &temp_correct_count, NULL);
correct_count += temp_correct_count;
}
val_acc.push_back(correct_count);
std::cout << "val_acc: " << val_acc[i] << std::endl;
// learning_rate *= learning_rate_decay;
// std::cout << "learning_rate: " << learning_rate << std::endl;
}
learning_rate *= learning_rate_decay;
}
void Solver::checkAccuracy(void *X, int *y, int num_samples, int *num_correct) {
int batch_size = model->batch_size;
int num_iter = num_samples / batch_size;
*num_correct = 0;
for (int i = 0; i < num_iter; i++) {
int start_sample = i * num_features * batch_size;
int temp_correct_count;
if (model->data_type == CUDNN_DATA_FLOAT)
model->getLoss(&(((float *)X)[start_sample]), &y[i * batch_size], false, &temp_correct_count, NULL);
else if (model->data_type == CUDNN_DATA_DOUBLE)
model->getLoss(&(((double *)X)[start_sample]), &y[i * batch_size], false, &temp_correct_count, NULL);
*num_correct = *num_correct + temp_correct_count;
}
}
void Solver::getTrainTime(std::vector<float> &loss, std::vector<float> &time, int num_epoch) {
int batch_size = model->batch_size;
int num_train_batches = num_train / model->batch_size;
for (int i = 0; i < num_epoch; i++) {
for (int j = 0; j < num_train_batches; j++) {
int start_sample = j * num_features * batch_size;
checkCudaErrors(cudaEventRecord(start));
float milli;
float temp_loss = step(start_sample, j * batch_size);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&milli, start, stop));
// std::cout << "One forward, backward pass time(ms): " << milli << std::endl;
loss.push_back(temp_loss);
time.push_back(milli);
// std::cout << "loss: " << temp_loss << std::endl;
}
}
learning_rate *= learning_rate_decay;
} |
4f350dd7e81f48331bba0e3e747a74d0d523a81c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::linear(std::string name, Tensor input, int output_channels, bool relu)
{
assert(input.numDim == 2);
IndexSpaceT<2> task_is;
Linear *li = new Linear(name, config, input, task_is, output_channels, relu);
layers.push_back(li);
return li->output;
}
Linear::Linear(std::string _name, FFConfig _config,
Tensor _input, IndexSpaceT<2> _task_is,
int _output_channels, bool _relu)
: Op(_name, _input), task_is(_task_is),
relu(_relu), profiling(_config.profiling),
in_channels(_input.adim[0]), out_channels(_output_channels)
{
assert(_input.numDim == 2);
Context ctx = _config.lg_ctx;
HighLevelRuntime* runtime = _config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
fc_num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int fc_num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
num_replica = fc_num_par_n;
printf("Linear fc_num_par_c(%d) fc_num_par_n(%d)\n", fc_num_par_c, fc_num_par_n);
FieldSpace fs = _config.field_space;
Rect<2, coord_t> output_rect(Point<2>(0, 0), Point<2>(out_channels-1, _input.adim[1]-1));
IndexSpaceT<2> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
Transform<2, 2, coord_t> transform;
int extent_c = (out_channels + fc_num_par_c - 1) / fc_num_par_c;
int extent_n = (_input.adim[1] + fc_num_par_n - 1) / fc_num_par_n;
Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_c-1, extent_n-1));
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
// Note: we only need replica's grad, so no need to create lr/lp for forward
Rect<2, coord_t> replica_rect(Point<2>(0, 0),
Point<2>(in_channels*fc_num_par_c-1, _input.adim[1]-1));
IndexSpaceT<2> replica_is = runtime->create_index_space(ctx, replica_rect);
LogicalRegion replica_lr = runtime->create_logical_region(ctx, replica_is, fs);
transform[0][0] = in_channels;
transform[1][1] = extent_n;
Rect<2, coord_t> extent_r(Point<2>(0, 0), Point<2>(in_channels-1, extent_n-1));
IndexPartition replica_ip =
runtime->create_partition_by_restriction(ctx, replica_is, task_is, transform, extent_r);
assert(runtime->is_index_partition_disjoint(ctx, replica_ip));
assert(runtime->is_index_partition_complete(ctx, replica_ip));
LogicalPartition replica_lp = runtime->get_logical_partition(ctx, replica_lr, replica_ip);
Tensor replica_tensor;
replica_tensor.region = LogicalRegion::NO_REGION;
replica_tensor.part = LogicalPartition::NO_PART;
replica_tensor.region_grad = replica_lr;
replica_tensor.part_grad = replica_lp;
locals[0] = replica_tensor;
// Create subpartitions for backward prop aggregation
for (int i = 0; i < fc_num_par_c; i++) {
transform[0][0] = _input.pdim[0];
transform[1][1] = _input.pdim[1];
Rect<2, coord_t> ext(Point<2>(in_channels*i, 0),
Point<2>(in_channels*i + _input.pdim[0] - 1,
_input.pdim[1]-1));
IndexPartition ip =
runtime->create_partition_by_restriction(ctx, replica_is, task_is, transform, ext);
assert(runtime->is_index_partition_disjoint(ctx, ip));
replica_sub_lps[i] = runtime->get_logical_partition(ctx, replica_lr, ip);
}
Rect<1, coord_t> kernel_rect(0, in_channels * out_channels - 1);
Rect<2, coord_t> kernel_grad_rect(Point<2>(0, 0), Point<2>(out_channels * in_channels-1, fc_num_par_n-1));
IndexSpaceT<1> kernel_is = runtime->create_index_space(ctx, kernel_rect);
IndexSpaceT<2> kernel_grad_is = runtime->create_index_space(ctx, kernel_grad_rect);
LogicalRegion kernel_lr = runtime->create_logical_region(ctx, kernel_is, fs);
LogicalRegion kernel_grad_lr = runtime->create_logical_region(ctx, kernel_grad_is, fs);
transform[0][0] = extent_c * in_channels;
transform[1][1] = 1;
Rect<2, coord_t> extent_k_grad(Point<2>(0, 0), Point<2>(extent_c*in_channels-1, 0));
printf("extent_k(%dx%d %d)\n", extent_c, in_channels, 1);
IndexPartition kernel_grad_ip =
runtime->create_partition_by_restriction(ctx, kernel_grad_is, task_is,
transform, extent_k_grad);
assert(runtime->is_index_partition_disjoint(ctx, kernel_grad_ip));
assert(runtime->is_index_partition_complete(ctx, kernel_grad_ip));
LogicalPartition kernel_grad_lp =
runtime->get_logical_partition(ctx, kernel_grad_lr, kernel_grad_ip);
Transform<1, 2, coord_t> trans;
trans[0][0] = extent_c * in_channels; trans[0][1] = 0;
Rect<1, coord_t> extent_k(0, extent_c*in_channels-1);
IndexPartition kernel_ip =
runtime->create_partition_by_restriction(ctx, kernel_is, task_is, trans, extent_k);
LogicalPartition kernel_lp =
runtime->get_logical_partition(ctx, kernel_lr, kernel_ip);
Tensor kernel_tensor;
kernel_tensor.region = kernel_lr;
kernel_tensor.part = kernel_lp;
kernel_tensor.region_grad = kernel_grad_lr;
kernel_tensor.part_grad = kernel_grad_lp;
locals[1] = kernel_tensor;
Rect<1, coord_t> bias_rect(0, out_channels-1);
Rect<2, coord_t> bias_grad_rect(Point<2>(0, 0), Point<2>(out_channels-1, fc_num_par_n-1));
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<2> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs);
transform[0][0] = extent_c;
transform[1][1] = 1;
Rect<2, coord_t> extent_b_grad(Point<2>(0, 0), Point<2>(extent_c-1,0));
IndexPartition bias_grad_ip =
runtime->create_partition_by_restriction(ctx, bias_grad_is, task_is,
transform, extent_b_grad);
assert(runtime->is_index_partition_disjoint(ctx, bias_grad_ip));
assert(runtime->is_index_partition_complete(ctx, bias_grad_ip));
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
trans[0][0] = extent_c; trans[0][1] = 0;
Rect<1, coord_t> extent_b(0, extent_c-1);
IndexPartition bias_ip =
runtime->create_partition_by_restriction(ctx, bias_is, task_is, trans, extent_b);
LogicalPartition bias_lp =
runtime->get_logical_partition(ctx, bias_lr, bias_ip);
Tensor bias_tensor;
bias_tensor.region = bias_lr;
bias_tensor.part = bias_lp;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part_grad = bias_grad_lp;
locals[2] = bias_tensor;
numLocals = 3;
output.numDim = 2;
output.adim[0] = out_channels;
output.adim[1] = _input.adim[1];
output.pdim[0] = extent_c;
output.pdim[1] = extent_n;
output.region = output_lr;
output.part = output_lp;
output.region_grad = output_grad_lr;
output.part_grad = output_grad_lp;
// Every partition reads all in_channels
transform[0][0] = 0;
transform[1][1] = extent_n;
Rect<2, coord_t> extent_i(Point<2>(0, 0), Point<2>(in_channels-1, extent_n-1));
IndexSpaceT<2> input_is = IndexSpaceT<2>(inputs[0].region.get_index_space());
IndexPartition input_ip
= runtime->create_partition_by_restriction(ctx, input_is, task_is, transform, extent_i);
input_lps[0] = runtime->get_logical_partition(ctx, inputs[0].region, input_ip);
}
/*
regions[0](I): input
regions[1](O): output
regions[2]: replica
regions[3](I): kernel
regions[4](I): bias
*/
OpMeta* Linear::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 5);
assert(task->regions.size() == 5);
const Linear* linear = (Linear*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
//const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
//const AccessorWO<float, 2> acc_output(regions[1], FID_DATA);
//const AccessorRO<float, 1> acc_kernel(regions[3], FID_DATA);
//const AccessorRO<float, 1> acc_bias(regions[4], FID_DATA);
Rect<2> rect_input, rect_output, rect_replica;
Rect<1> rect_kernel, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_replica = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_kernel = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
assert(rect_replica.volume() == rect_input.volume());
//assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
//assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
//const float* kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
//const float* bias_ptr = acc_bias.ptr(rect_bias.lo);
int input_channels = rect_input.hi[0] - rect_input.lo[0] + 1;
int output_channels = rect_output.hi[0] - rect_output.lo[0] + 1;
int batch_size = linear->output.pdim[1];
printf("init linear (input): in_c(%d) out_c(%d) batch_size(%d)\n", input_channels, output_channels, batch_size);
LinearMeta* m = new LinearMeta(handle);
m->relu = linear->relu;
m->in_channels = input_channels;
m->out_channels = output_channels;
m->batch_size = batch_size;
float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size);
for (int i = 0; i < batch_size; i++)
dram_one_ptr[i] = 1.0f;
checkCUDA(hipMalloc(&m->one_ptr, sizeof(float) * batch_size));
checkCUDA(hipMemcpy(m->one_ptr, dram_one_ptr,
sizeof(float) * batch_size, hipMemcpyHostToDevice));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch_size, output_channels, 1, 1));
}
return m;
}
/*
regions[0](O): filter
regions[1](O): bias
*/
__host__
void Linear::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Linear* linear = (Linear*) task->args;
const AccessorWO<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_filter, rect_bias;
rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init filter and bias
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
hiprandGenerator_t genGPU;
hiprandCreateGenerator(&genGPU, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetStream(genGPU, stream);
hiprandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
coord_t filter_elements = linear->in_channels * linear->out_channels;
float factor = 1.0f / sqrt(linear->in_channels);
assert(filter_elements == rect_filter.volume());
hiprandGenerateUniform(genGPU, filter_ptr, filter_elements);
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(filter_elements)), dim3(CUDA_NUM_THREADS), 0, 0,
filter_ptr, filter_elements, -factor, factor);
hiprandGenerateUniform(genGPU, bias_ptr, linear->out_channels);
assert(linear->out_channels == rect_bias.volume());
hipLaunchKernelGGL(( scale_kernel), dim3(GET_BLOCKS(linear->out_channels)), dim3(CUDA_NUM_THREADS), 0, 0,
bias_ptr, linear->out_channels, -factor, factor);
hiprandDestroyGenerator(genGPU);
}
void Linear::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the filter and bias parameters
{
TaskLauncher para_launcher(LINEAR_INIT_PARA_TASK_ID, TaskArgument(this, sizeof(Linear)));
para_launcher.add_region_requirement(
RegionRequirement(locals[1].region, WRITE_DISCARD, EXCLUSIVE, locals[1].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(locals[2].region, WRITE_DISCARD, EXCLUSIVE, locals[2].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(LINEAR_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
init_launcher.add_field(3, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[2].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[2].region));
init_launcher.add_field(4, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I); input
regions[1](O): output
regions[2](I): kernel
regions[3](I): bias
*/
__host__
void Linear::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
int input_channels = m->in_channels;
int output_channels = m->out_channels;
int batch_size = m->batch_size;
const float *one_ptr = m->one_ptr;
const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 2> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_kernel(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<2> rect_input, rect_output;
Rect<1> rect_kernel, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_kernel = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
// make sure the sizes match
assert(rect_input.volume() == input_channels * batch_size);
assert(rect_output.volume() == output_channels * batch_size);
assert(rect_kernel.volume() == input_channels * output_channels);
assert(rect_bias.volume() == output_channels);
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
//assert(acc_replica.accessor.is_dense_arbitrary(rect_replica));
assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
//float *replica_ptr = acc_output.ptr(rect_replica.lo);
const float *kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
//float *pre_relu_ptr = (m->relu) ? m->pre_relu : output_ptr;
//checkCUDA(hipMemcpy(replica_ptr, input_ptr, rect_input.volume() * sizeof(float),
// hipMemcpyDeviceToDevice));
hipEvent_t t_start, t_end;
if (linear->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N,
output_channels, batch_size, input_channels,
&alpha, kernel_ptr, input_channels,
input_ptr, input_channels, &beta,
output_ptr, output_channels));
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N,
output_channels, batch_size, 1,
&alpha, bias_ptr, 1,
one_ptr, 1, &alpha,
output_ptr, output_channels));
if (m->relu) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
if (linear->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Linear forward time = %.2lfms\n", elapsed);
}
}
void Linear::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
//launcher.add_region_requirement(
// RegionRequirement(locals[0].partition, 0/*projection id*/,
// WRITE_DISCARD, EXCLUSIVE, locals[0].region));
//launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[2].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](O): replica_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](O): filter_grad
regions[6](O): bias_grad
*/
__host__
void Linear::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f, beta = 0.0f;
const Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
int input_channels = m->in_channels;
int output_channels = m->out_channels;
int batch_size = m->batch_size;
const float *one_ptr = m->one_ptr;
const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 2> acc_replica_grad(regions[1], FID_DATA);
const AccessorRO<float, 2> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 2> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_kernel(regions[4], FID_DATA);
const AccessorWO<float, 2> acc_kernel_grad(regions[5], FID_DATA);
const AccessorWO<float, 2> acc_bias_grad(regions[6], FID_DATA);
Rect<2> rect_input, rect_replica_grad, rect_output, rect_output_grad,
rect_kernel_grad, rect_bias_grad;
Rect<1> rect_kernel;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_replica_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_kernel =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_kernel_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure the sizes match
assert(rect_input.volume() == input_channels * batch_size);
assert(rect_replica_grad.volume() == input_channels * batch_size);
assert(rect_output.volume() == output_channels * batch_size);
assert(rect_output_grad.volume() == output_channels * batch_size);
assert(rect_kernel.volume() == input_channels * output_channels);
assert(rect_kernel_grad.volume() == input_channels * output_channels);
assert(rect_bias_grad.volume() == output_channels);
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_replica_grad.accessor.is_dense_arbitrary(rect_replica_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
assert(acc_kernel_grad.accessor.is_dense_arbitrary(rect_kernel_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *replica_grad_ptr = acc_replica_grad.ptr(rect_replica_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
float *kernel_grad_ptr = acc_kernel_grad.ptr(rect_kernel_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
hipEvent_t t_start, t_end;
if (linear->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
if (m->relu) {
int n = rect_output.volume();
hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, 0, output_grad_ptr, output_ptr, n);
}
// Compute weight gradiant
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_T,
input_channels, output_channels, batch_size,
&alpha, input_ptr, input_channels,
output_grad_ptr, output_channels,
&beta, kernel_grad_ptr, input_channels));
// Compute bias gradiant
checkCUDA(hipblasSgemv(m->handle.blas, HIPBLAS_OP_N,
output_channels, batch_size,
&alpha, output_grad_ptr, output_channels,
one_ptr, 1,
&beta, bias_grad_ptr, 1));
// Compute data gradiant
checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_N,
input_channels, batch_size, output_channels,
&alpha, kernel_ptr, input_channels,
output_grad_ptr, output_channels,
&beta, replica_grad_ptr, input_channels));
if (linear->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Linear backward time = %.2lfms\n", elapsed);
}
}
/*
regions[0](O): input_grad
regions[1..fc_num_par_c]: subreplicas
*/
__host__
void Linear::backward2_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
float alpha = 1.0f;
const LinearMeta* m = *((LinearMeta**) task->local_args);
const AccessorWO<float, 2> acc_input(regions[0], FID_DATA);
Rect<2> rect_input, rect_replica;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
float *input_ptr = acc_input.ptr(rect_input.lo);
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
for (int i = 1; i < task->regions.size(); i++) {
const AccessorRO<float, 2> acc_replica(regions[i], FID_DATA);
rect_replica = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space());
//printf("rect_replica.hi = %lld lo = %lld\n", rect_replica.hi[0], rect_replica.lo[0]);
//printf("rect_replica.hi = %lld lo = %lld\n", rect_replica.hi[1], rect_replica.lo[1]);
//printf("rect_input.hi = %lld lo = %lld\n", rect_input.hi[0], rect_input.lo[0]);
//printf("rect_input.hi = %lld lo = %lld\n", rect_input.hi[1], rect_input.lo[1]);
assert(rect_replica.volume() == rect_input.volume());
assert(acc_replica.accessor.is_dense_arbitrary(rect_replica));
const float *replica_ptr = acc_replica.ptr(rect_replica.lo);
if (i == 1)
checkCUDA(hipblasScopy(m->handle.blas, rect_input.volume(),
replica_ptr, 1, input_ptr, 1));
else
checkCUDA(hipblasSaxpy(m->handle.blas, rect_input.volume(),
&alpha, replica_ptr, 1, input_ptr, 1));
}
}
void Linear::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
{
IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](O): replica_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(locals[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, output.region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(output.part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, output.region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
launcher.add_field(4, FID_DATA);
// regions[5](O): filter_grad
launcher.add_region_requirement(
RegionRequirement(locals[1].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](O): bias_grad
launcher.add_region_requirement(
RegionRequirement(locals[2].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[2].region_grad));
launcher.add_field(6, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
{
// We aggregate parameters from replica tensor to input tensor
IndexLauncher launcher2(LINEAR_BWD2_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
launcher2.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, inputs[0].region_grad));
launcher2.add_field(0, FID_DATA);
for (int i = 0; i < fc_num_par_c; i++) {
launcher2.add_region_requirement(
RegionRequirement(replica_sub_lps[i], 0/*partition id*/,
READ_ONLY, EXCLUSIVE, locals[0].region_grad));
launcher2.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher2);
}
}
/*
regions[0](I/O): filter
regions[1](I): filter_grad
regions[2](I/O): bias
regions[3](I): bias_grad
*/
__host__
void Linear::update_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const Linear* linear = (Linear*) task->args;
const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorRO<float, 2> acc_filter_grad(regions[1], FID_DATA);
const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA);
const AccessorRO<float, 2> acc_bias_grad(regions[3], FID_DATA);
Rect<1> rect_filter, rect_bias;
Rect<2> rect_filter_grad, rect_bias_grad;
rect_filter =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_filter_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_bias =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
size_t filter_size = rect_filter.volume();
size_t bias_size = rect_bias.volume();
assert(filter_size == linear->in_channels * linear->out_channels);
assert(bias_size == linear->out_channels);
printf("filter_size(%d) linear->num_replica(%d) rect_filter_grad(%d)\n", filter_size, linear->num_replica, rect_filter_grad.volume());
assert(filter_size * linear->num_replica == rect_filter_grad.volume());
assert(bias_size * linear->num_replica == rect_bias_grad.volume());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
updateGAS(filter_ptr, filter_grad_ptr, filter_size,
linear->num_replica, linear->learning_rate);
updateGAS(bias_ptr, bias_grad_ptr, bias_size,
linear->num_replica, linear->learning_rate);
}
__host__
void Linear::update(const FFModel& ff)
{
// Synchronize the learning rate
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
learning_rate = ff.config.learningRate;
assert(num_replica > 0);
// Only aggregate parameters if more than one replica
if (num_replica > 1) {
TaskLauncher launcher(LINEAR_UPD_TASK_ID, TaskArgument(this, sizeof(Linear)));
launcher.add_region_requirement(
RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].region, READ_WRITE, EXCLUSIVE, locals[2].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].region_grad, READ_ONLY, EXCLUSIVE, locals[2].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_task(ctx, launcher);
}
}
| 4f350dd7e81f48331bba0e3e747a74d0d523a81c.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::linear(std::string name, Tensor input, int output_channels, bool relu)
{
assert(input.numDim == 2);
IndexSpaceT<2> task_is;
Linear *li = new Linear(name, config, input, task_is, output_channels, relu);
layers.push_back(li);
return li->output;
}
Linear::Linear(std::string _name, FFConfig _config,
Tensor _input, IndexSpaceT<2> _task_is,
int _output_channels, bool _relu)
: Op(_name, _input), task_is(_task_is),
relu(_relu), profiling(_config.profiling),
in_channels(_input.adim[0]), out_channels(_output_channels)
{
assert(_input.numDim == 2);
Context ctx = _config.lg_ctx;
HighLevelRuntime* runtime = _config.lg_hlr;
Rect<2> part_rect = runtime->get_index_space_domain(ctx, task_is);
fc_num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
int fc_num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
num_replica = fc_num_par_n;
printf("Linear fc_num_par_c(%d) fc_num_par_n(%d)\n", fc_num_par_c, fc_num_par_n);
FieldSpace fs = _config.field_space;
Rect<2, coord_t> output_rect(Point<2>(0, 0), Point<2>(out_channels-1, _input.adim[1]-1));
IndexSpaceT<2> output_is = runtime->create_index_space(ctx, output_rect);
LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs);
LogicalRegion output_grad_lr = runtime->create_logical_region(ctx, output_is, fs);
Transform<2, 2, coord_t> transform;
int extent_c = (out_channels + fc_num_par_c - 1) / fc_num_par_c;
int extent_n = (_input.adim[1] + fc_num_par_n - 1) / fc_num_par_n;
Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_c-1, extent_n-1));
transform[0][0] = extent_c; transform[0][1] = 0;
transform[1][0] = 0; transform[1][1] = extent_n;
IndexPartition output_ip =
runtime->create_partition_by_restriction(ctx, output_is, task_is, transform, extent);
assert(runtime->is_index_partition_disjoint(ctx, output_ip));
assert(runtime->is_index_partition_complete(ctx, output_ip));
LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip);
LogicalPartition output_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, output_ip);
// Note: we only need replica's grad, so no need to create lr/lp for forward
Rect<2, coord_t> replica_rect(Point<2>(0, 0),
Point<2>(in_channels*fc_num_par_c-1, _input.adim[1]-1));
IndexSpaceT<2> replica_is = runtime->create_index_space(ctx, replica_rect);
LogicalRegion replica_lr = runtime->create_logical_region(ctx, replica_is, fs);
transform[0][0] = in_channels;
transform[1][1] = extent_n;
Rect<2, coord_t> extent_r(Point<2>(0, 0), Point<2>(in_channels-1, extent_n-1));
IndexPartition replica_ip =
runtime->create_partition_by_restriction(ctx, replica_is, task_is, transform, extent_r);
assert(runtime->is_index_partition_disjoint(ctx, replica_ip));
assert(runtime->is_index_partition_complete(ctx, replica_ip));
LogicalPartition replica_lp = runtime->get_logical_partition(ctx, replica_lr, replica_ip);
Tensor replica_tensor;
replica_tensor.region = LogicalRegion::NO_REGION;
replica_tensor.part = LogicalPartition::NO_PART;
replica_tensor.region_grad = replica_lr;
replica_tensor.part_grad = replica_lp;
locals[0] = replica_tensor;
// Create subpartitions for backward prop aggregation
for (int i = 0; i < fc_num_par_c; i++) {
transform[0][0] = _input.pdim[0];
transform[1][1] = _input.pdim[1];
Rect<2, coord_t> ext(Point<2>(in_channels*i, 0),
Point<2>(in_channels*i + _input.pdim[0] - 1,
_input.pdim[1]-1));
IndexPartition ip =
runtime->create_partition_by_restriction(ctx, replica_is, task_is, transform, ext);
assert(runtime->is_index_partition_disjoint(ctx, ip));
replica_sub_lps[i] = runtime->get_logical_partition(ctx, replica_lr, ip);
}
Rect<1, coord_t> kernel_rect(0, in_channels * out_channels - 1);
Rect<2, coord_t> kernel_grad_rect(Point<2>(0, 0), Point<2>(out_channels * in_channels-1, fc_num_par_n-1));
IndexSpaceT<1> kernel_is = runtime->create_index_space(ctx, kernel_rect);
IndexSpaceT<2> kernel_grad_is = runtime->create_index_space(ctx, kernel_grad_rect);
LogicalRegion kernel_lr = runtime->create_logical_region(ctx, kernel_is, fs);
LogicalRegion kernel_grad_lr = runtime->create_logical_region(ctx, kernel_grad_is, fs);
transform[0][0] = extent_c * in_channels;
transform[1][1] = 1;
Rect<2, coord_t> extent_k_grad(Point<2>(0, 0), Point<2>(extent_c*in_channels-1, 0));
printf("extent_k(%dx%d %d)\n", extent_c, in_channels, 1);
IndexPartition kernel_grad_ip =
runtime->create_partition_by_restriction(ctx, kernel_grad_is, task_is,
transform, extent_k_grad);
assert(runtime->is_index_partition_disjoint(ctx, kernel_grad_ip));
assert(runtime->is_index_partition_complete(ctx, kernel_grad_ip));
LogicalPartition kernel_grad_lp =
runtime->get_logical_partition(ctx, kernel_grad_lr, kernel_grad_ip);
Transform<1, 2, coord_t> trans;
trans[0][0] = extent_c * in_channels; trans[0][1] = 0;
Rect<1, coord_t> extent_k(0, extent_c*in_channels-1);
IndexPartition kernel_ip =
runtime->create_partition_by_restriction(ctx, kernel_is, task_is, trans, extent_k);
LogicalPartition kernel_lp =
runtime->get_logical_partition(ctx, kernel_lr, kernel_ip);
Tensor kernel_tensor;
kernel_tensor.region = kernel_lr;
kernel_tensor.part = kernel_lp;
kernel_tensor.region_grad = kernel_grad_lr;
kernel_tensor.part_grad = kernel_grad_lp;
locals[1] = kernel_tensor;
Rect<1, coord_t> bias_rect(0, out_channels-1);
Rect<2, coord_t> bias_grad_rect(Point<2>(0, 0), Point<2>(out_channels-1, fc_num_par_n-1));
IndexSpaceT<1> bias_is = runtime->create_index_space(ctx, bias_rect);
IndexSpaceT<2> bias_grad_is = runtime->create_index_space(ctx, bias_grad_rect);
LogicalRegion bias_lr = runtime->create_logical_region(ctx, bias_is, fs);
LogicalRegion bias_grad_lr = runtime->create_logical_region(ctx, bias_grad_is, fs);
transform[0][0] = extent_c;
transform[1][1] = 1;
Rect<2, coord_t> extent_b_grad(Point<2>(0, 0), Point<2>(extent_c-1,0));
IndexPartition bias_grad_ip =
runtime->create_partition_by_restriction(ctx, bias_grad_is, task_is,
transform, extent_b_grad);
assert(runtime->is_index_partition_disjoint(ctx, bias_grad_ip));
assert(runtime->is_index_partition_complete(ctx, bias_grad_ip));
LogicalPartition bias_grad_lp =
runtime->get_logical_partition(ctx, bias_grad_lr, bias_grad_ip);
trans[0][0] = extent_c; trans[0][1] = 0;
Rect<1, coord_t> extent_b(0, extent_c-1);
IndexPartition bias_ip =
runtime->create_partition_by_restriction(ctx, bias_is, task_is, trans, extent_b);
LogicalPartition bias_lp =
runtime->get_logical_partition(ctx, bias_lr, bias_ip);
Tensor bias_tensor;
bias_tensor.region = bias_lr;
bias_tensor.part = bias_lp;
bias_tensor.region_grad = bias_grad_lr;
bias_tensor.part_grad = bias_grad_lp;
locals[2] = bias_tensor;
numLocals = 3;
output.numDim = 2;
output.adim[0] = out_channels;
output.adim[1] = _input.adim[1];
output.pdim[0] = extent_c;
output.pdim[1] = extent_n;
output.region = output_lr;
output.part = output_lp;
output.region_grad = output_grad_lr;
output.part_grad = output_grad_lp;
// Every partition reads all in_channels
transform[0][0] = 0;
transform[1][1] = extent_n;
Rect<2, coord_t> extent_i(Point<2>(0, 0), Point<2>(in_channels-1, extent_n-1));
IndexSpaceT<2> input_is = IndexSpaceT<2>(inputs[0].region.get_index_space());
IndexPartition input_ip
= runtime->create_partition_by_restriction(ctx, input_is, task_is, transform, extent_i);
input_lps[0] = runtime->get_logical_partition(ctx, inputs[0].region, input_ip);
}
/*
regions[0](I): input
regions[1](O): output
regions[2]: replica
regions[3](I): kernel
regions[4](I): bias
*/
OpMeta* Linear::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 5);
assert(task->regions.size() == 5);
const Linear* linear = (Linear*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
//const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
//const AccessorWO<float, 2> acc_output(regions[1], FID_DATA);
//const AccessorRO<float, 1> acc_kernel(regions[3], FID_DATA);
//const AccessorRO<float, 1> acc_bias(regions[4], FID_DATA);
Rect<2> rect_input, rect_output, rect_replica;
Rect<1> rect_kernel, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_replica = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_kernel = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
assert(rect_replica.volume() == rect_input.volume());
//assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
//assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
//const float* kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
//const float* bias_ptr = acc_bias.ptr(rect_bias.lo);
int input_channels = rect_input.hi[0] - rect_input.lo[0] + 1;
int output_channels = rect_output.hi[0] - rect_output.lo[0] + 1;
int batch_size = linear->output.pdim[1];
printf("init linear (input): in_c(%d) out_c(%d) batch_size(%d)\n", input_channels, output_channels, batch_size);
LinearMeta* m = new LinearMeta(handle);
m->relu = linear->relu;
m->in_channels = input_channels;
m->out_channels = output_channels;
m->batch_size = batch_size;
float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size);
for (int i = 0; i < batch_size; i++)
dram_one_ptr[i] = 1.0f;
checkCUDA(cudaMalloc(&m->one_ptr, sizeof(float) * batch_size));
checkCUDA(cudaMemcpy(m->one_ptr, dram_one_ptr,
sizeof(float) * batch_size, cudaMemcpyHostToDevice));
if (m->relu) {
checkCUDNN(cudnnCreateActivationDescriptor(&m->actiDesc));
checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN, 0.0));
checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch_size, output_channels, 1, 1));
}
return m;
}
/*
regions[0](O): filter
regions[1](O): bias
*/
__host__
void Linear::init_para_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Linear* linear = (Linear*) task->args;
const AccessorWO<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorWO<float, 1> acc_bias(regions[1], FID_DATA);
Rect<1> rect_filter, rect_bias;
rect_filter = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
// init filter and bias
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
curandGenerator_t genGPU;
curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_DEFAULT);
curandSetStream(genGPU, stream);
curandSetPseudoRandomGeneratorSeed(genGPU, 1234ULL);
coord_t filter_elements = linear->in_channels * linear->out_channels;
float factor = 1.0f / sqrt(linear->in_channels);
assert(filter_elements == rect_filter.volume());
curandGenerateUniform(genGPU, filter_ptr, filter_elements);
scale_kernel<<<GET_BLOCKS(filter_elements), CUDA_NUM_THREADS>>>(
filter_ptr, filter_elements, -factor, factor);
curandGenerateUniform(genGPU, bias_ptr, linear->out_channels);
assert(linear->out_channels == rect_bias.volume());
scale_kernel<<<GET_BLOCKS(linear->out_channels), CUDA_NUM_THREADS>>>(
bias_ptr, linear->out_channels, -factor, factor);
curandDestroyGenerator(genGPU);
}
void Linear::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
// First we initialize the filter and bias parameters
{
TaskLauncher para_launcher(LINEAR_INIT_PARA_TASK_ID, TaskArgument(this, sizeof(Linear)));
para_launcher.add_region_requirement(
RegionRequirement(locals[1].region, WRITE_DISCARD, EXCLUSIVE, locals[1].region));
para_launcher.add_field(0, FID_DATA);
para_launcher.add_region_requirement(
RegionRequirement(locals[2].region, WRITE_DISCARD, EXCLUSIVE, locals[2].region));
para_launcher.add_field(1, FID_DATA);
runtime->execute_task(ctx, para_launcher);
}
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[idx++];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher init_launcher(LINEAR_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
init_launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
init_launcher.add_field(0, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
init_launcher.add_field(1, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad));
init_launcher.add_field(2, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
init_launcher.add_field(3, FID_DATA);
init_launcher.add_region_requirement(
RegionRequirement(locals[2].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[2].region));
init_launcher.add_field(4, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, init_launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/*
regions[0](I); input
regions[1](O): output
regions[2](I): kernel
regions[3](I): bias
*/
__host__
void Linear::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
float alpha = 1.0f, beta = 0.0f;
const Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
int input_channels = m->in_channels;
int output_channels = m->out_channels;
int batch_size = m->batch_size;
const float *one_ptr = m->one_ptr;
const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 2> acc_output(regions[1], FID_DATA);
const AccessorRO<float, 1> acc_kernel(regions[2], FID_DATA);
const AccessorRO<float, 1> acc_bias(regions[3], FID_DATA);
Rect<2> rect_input, rect_output;
Rect<1> rect_kernel, rect_bias;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_kernel = runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias = runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
// make sure the sizes match
assert(rect_input.volume() == input_channels * batch_size);
assert(rect_output.volume() == output_channels * batch_size);
assert(rect_kernel.volume() == input_channels * output_channels);
assert(rect_bias.volume() == output_channels);
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
//assert(acc_replica.accessor.is_dense_arbitrary(rect_replica));
assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *output_ptr = acc_output.ptr(rect_output.lo);
//float *replica_ptr = acc_output.ptr(rect_replica.lo);
const float *kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
const float *bias_ptr = acc_bias.ptr(rect_bias.lo);
//float *pre_relu_ptr = (m->relu) ? m->pre_relu : output_ptr;
//checkCUDA(cudaMemcpy(replica_ptr, input_ptr, rect_input.volume() * sizeof(float),
// cudaMemcpyDeviceToDevice));
cudaEvent_t t_start, t_end;
if (linear->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N,
output_channels, batch_size, input_channels,
&alpha, kernel_ptr, input_channels,
input_ptr, input_channels, &beta,
output_ptr, output_channels));
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N,
output_channels, batch_size, 1,
&alpha, bias_ptr, 1,
one_ptr, 1, &alpha,
output_ptr, output_channels));
if (m->relu) {
checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc,
&alpha, m->outputTensor, output_ptr,
&beta, m->outputTensor, output_ptr));
}
if (linear->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Linear forward time = %.2lfms\n", elapsed);
}
}
void Linear::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, output.region));
launcher.add_field(1, FID_DATA);
//launcher.add_region_requirement(
// RegionRequirement(locals[0].partition, 0/*projection id*/,
// WRITE_DISCARD, EXCLUSIVE, locals[0].region));
//launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[2].region));
launcher.add_field(3, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): input
regions[1](O): replica_grad
regions[2](I): output
regions[3](I/O): output_grad
regions[4](I): filter
regions[5](O): filter_grad
regions[6](O): bias_grad
*/
__host__
void Linear::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 7);
assert(task->regions.size() == 7);
float alpha = 1.0f, beta = 0.0f;
const Linear* linear = (Linear*) task->args;
const LinearMeta* m = *((LinearMeta**) task->local_args);
int input_channels = m->in_channels;
int output_channels = m->out_channels;
int batch_size = m->batch_size;
const float *one_ptr = m->one_ptr;
const AccessorRO<float, 2> acc_input(regions[0], FID_DATA);
const AccessorWO<float, 2> acc_replica_grad(regions[1], FID_DATA);
const AccessorRO<float, 2> acc_output(regions[2], FID_DATA);
const AccessorRW<float, 2> acc_output_grad(regions[3], FID_DATA);
const AccessorRO<float, 1> acc_kernel(regions[4], FID_DATA);
const AccessorWO<float, 2> acc_kernel_grad(regions[5], FID_DATA);
const AccessorWO<float, 2> acc_bias_grad(regions[6], FID_DATA);
Rect<2> rect_input, rect_replica_grad, rect_output, rect_output_grad,
rect_kernel_grad, rect_bias_grad;
Rect<1> rect_kernel;
rect_input =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_replica_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_output =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_output_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
rect_kernel =
runtime->get_index_space_domain(ctx, task->regions[4].region.get_index_space());
rect_kernel_grad =
runtime->get_index_space_domain(ctx, task->regions[5].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[6].region.get_index_space());
// make sure the sizes match
assert(rect_input.volume() == input_channels * batch_size);
assert(rect_replica_grad.volume() == input_channels * batch_size);
assert(rect_output.volume() == output_channels * batch_size);
assert(rect_output_grad.volume() == output_channels * batch_size);
assert(rect_kernel.volume() == input_channels * output_channels);
assert(rect_kernel_grad.volume() == input_channels * output_channels);
assert(rect_bias_grad.volume() == output_channels);
// make sure all regions are dense
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
assert(acc_replica_grad.accessor.is_dense_arbitrary(rect_replica_grad));
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad));
assert(acc_kernel.accessor.is_dense_arbitrary(rect_kernel));
assert(acc_kernel_grad.accessor.is_dense_arbitrary(rect_kernel_grad));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
const float *input_ptr = acc_input.ptr(rect_input.lo);
float *replica_grad_ptr = acc_replica_grad.ptr(rect_replica_grad.lo);
const float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo);
const float *kernel_ptr = acc_kernel.ptr(rect_kernel.lo);
float *kernel_grad_ptr = acc_kernel_grad.ptr(rect_kernel_grad.lo);
float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
cudaEvent_t t_start, t_end;
if (linear->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
if (m->relu) {
int n = rect_output.volume();
reluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS>>>(output_grad_ptr, output_ptr, n);
}
// Compute weight gradiant
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T,
input_channels, output_channels, batch_size,
&alpha, input_ptr, input_channels,
output_grad_ptr, output_channels,
&beta, kernel_grad_ptr, input_channels));
// Compute bias gradiant
checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N,
output_channels, batch_size,
&alpha, output_grad_ptr, output_channels,
one_ptr, 1,
&beta, bias_grad_ptr, 1));
// Compute data gradiant
checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N,
input_channels, batch_size, output_channels,
&alpha, kernel_ptr, input_channels,
output_grad_ptr, output_channels,
&beta, replica_grad_ptr, input_channels));
if (linear->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Linear backward time = %.2lfms\n", elapsed);
}
}
/*
regions[0](O): input_grad
regions[1..fc_num_par_c]: subreplicas
*/
__host__
void Linear::backward2_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
float alpha = 1.0f;
const LinearMeta* m = *((LinearMeta**) task->local_args);
const AccessorWO<float, 2> acc_input(regions[0], FID_DATA);
Rect<2> rect_input, rect_replica;
rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
float *input_ptr = acc_input.ptr(rect_input.lo);
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
for (int i = 1; i < task->regions.size(); i++) {
const AccessorRO<float, 2> acc_replica(regions[i], FID_DATA);
rect_replica = runtime->get_index_space_domain(ctx, task->regions[i].region.get_index_space());
//printf("rect_replica.hi = %lld lo = %lld\n", rect_replica.hi[0], rect_replica.lo[0]);
//printf("rect_replica.hi = %lld lo = %lld\n", rect_replica.hi[1], rect_replica.lo[1]);
//printf("rect_input.hi = %lld lo = %lld\n", rect_input.hi[0], rect_input.lo[0]);
//printf("rect_input.hi = %lld lo = %lld\n", rect_input.hi[1], rect_input.lo[1]);
assert(rect_replica.volume() == rect_input.volume());
assert(acc_replica.accessor.is_dense_arbitrary(rect_replica));
const float *replica_ptr = acc_replica.ptr(rect_replica.lo);
if (i == 1)
checkCUDA(cublasScopy(m->handle.blas, rect_input.volume(),
replica_ptr, 1, input_ptr, 1));
else
checkCUDA(cublasSaxpy(m->handle.blas, rect_input.volume(),
&alpha, replica_ptr, 1, input_ptr, 1));
}
}
void Linear::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
{
IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
// regions[0](I): input
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
// regions[1](O): replica_grad (we only need grad tensors)
launcher.add_region_requirement(
RegionRequirement(locals[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[0].region_grad));
launcher.add_field(1, FID_DATA);
// regions[2](I): output
launcher.add_region_requirement(
RegionRequirement(output.part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, output.region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(output.part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, output.region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I): filter
launcher.add_region_requirement(
RegionRequirement(locals[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, locals[1].region));
launcher.add_field(4, FID_DATA);
// regions[5](O): filter_grad
launcher.add_region_requirement(
RegionRequirement(locals[1].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(5, FID_DATA);
// regions[6](O): bias_grad
launcher.add_region_requirement(
RegionRequirement(locals[2].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, locals[2].region_grad));
launcher.add_field(6, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
{
// We aggregate parameters from replica tensor to input tensor
IndexLauncher launcher2(LINEAR_BWD2_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap);
launcher2.add_region_requirement(
RegionRequirement(inputs[0].part_grad, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, inputs[0].region_grad));
launcher2.add_field(0, FID_DATA);
for (int i = 0; i < fc_num_par_c; i++) {
launcher2.add_region_requirement(
RegionRequirement(replica_sub_lps[i], 0/*partition id*/,
READ_ONLY, EXCLUSIVE, locals[0].region_grad));
launcher2.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher2);
}
}
/*
regions[0](I/O): filter
regions[1](I): filter_grad
regions[2](I/O): bias
regions[3](I): bias_grad
*/
__host__
void Linear::update_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(task->regions.size() == 4);
const Linear* linear = (Linear*) task->args;
const AccessorRW<float, 1> acc_filter(regions[0], FID_DATA);
const AccessorRO<float, 2> acc_filter_grad(regions[1], FID_DATA);
const AccessorRW<float, 1> acc_bias(regions[2], FID_DATA);
const AccessorRO<float, 2> acc_bias_grad(regions[3], FID_DATA);
Rect<1> rect_filter, rect_bias;
Rect<2> rect_filter_grad, rect_bias_grad;
rect_filter =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
rect_filter_grad =
runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space());
rect_bias =
runtime->get_index_space_domain(ctx, task->regions[2].region.get_index_space());
rect_bias_grad =
runtime->get_index_space_domain(ctx, task->regions[3].region.get_index_space());
size_t filter_size = rect_filter.volume();
size_t bias_size = rect_bias.volume();
assert(filter_size == linear->in_channels * linear->out_channels);
assert(bias_size == linear->out_channels);
printf("filter_size(%d) linear->num_replica(%d) rect_filter_grad(%d)\n", filter_size, linear->num_replica, rect_filter_grad.volume());
assert(filter_size * linear->num_replica == rect_filter_grad.volume());
assert(bias_size * linear->num_replica == rect_bias_grad.volume());
assert(acc_filter.accessor.is_dense_arbitrary(rect_filter));
assert(acc_filter_grad.accessor.is_dense_arbitrary(rect_filter_grad));
assert(acc_bias.accessor.is_dense_arbitrary(rect_bias));
assert(acc_bias_grad.accessor.is_dense_arbitrary(rect_bias_grad));
float *filter_ptr = acc_filter.ptr(rect_filter.lo);
const float *filter_grad_ptr = acc_filter_grad.ptr(rect_filter_grad.lo);
float *bias_ptr = acc_bias.ptr(rect_bias.lo);
const float *bias_grad_ptr = acc_bias_grad.ptr(rect_bias_grad.lo);
updateGAS(filter_ptr, filter_grad_ptr, filter_size,
linear->num_replica, linear->learning_rate);
updateGAS(bias_ptr, bias_grad_ptr, bias_size,
linear->num_replica, linear->learning_rate);
}
__host__
void Linear::update(const FFModel& ff)
{
// Synchronize the learning rate
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
learning_rate = ff.config.learningRate;
assert(num_replica > 0);
// Only aggregate parameters if more than one replica
if (num_replica > 1) {
TaskLauncher launcher(LINEAR_UPD_TASK_ID, TaskArgument(this, sizeof(Linear)));
launcher.add_region_requirement(
RegionRequirement(locals[1].region, READ_WRITE, EXCLUSIVE, locals[1].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[1].region_grad, READ_ONLY, EXCLUSIVE, locals[1].region_grad));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].region, READ_WRITE, EXCLUSIVE, locals[2].region));
launcher.add_field(2, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(locals[2].region_grad, READ_ONLY, EXCLUSIVE, locals[2].region_grad));
launcher.add_field(3, FID_DATA);
runtime->execute_task(ctx, launcher);
}
}
|
a2b0f2b61d589faffde19b5afc6905ff3e2c4103.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/argsort_op.h"
#include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/platform/cuda_device_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
// set cub base traits in order to handle float16
namespace cub {
template <>
struct NumericTraits<paddle::platform::float16>
: BaseTraits<FLOATING_POINT, true, false, uint16_t,
paddle::platform::float16> {};
} // namespace cub
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
// Iter for move to next row
struct SegmentOffsetIter {
EIGEN_DEVICE_FUNC
explicit SegmentOffsetIter(int num_cols) : num_cols_(num_cols) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator()(int idx) const {
return idx * num_cols_;
}
int num_cols_;
};
template <typename T>
static __global__ void FillIndex(T* indices, T num_rows, T num_cols) {
int col_id = threadIdx.x;
int row_id = blockIdx.x;
for (T j = row_id; j < num_rows; j += gridDim.x) {
for (T i = col_id; i < num_cols; i += blockDim.x) {
indices[j * num_cols + i] = i;
}
}
}
template <typename T, typename IndType>
static __global__ void FillGrad(const T* dO, const IndType* indices, T* dX,
IndType num_rows, IndType num_cols) {
int col_id = threadIdx.x;
int row_id = blockIdx.x;
for (IndType j = row_id; j < num_rows; j += gridDim.x) {
for (IndType i = col_id; i < num_cols; i += blockDim.x) {
dX[j * num_cols + indices[j * num_cols + i]] = dO[j * num_cols + i];
}
}
}
// Sort by flag descending, True: descending. False: Ascending.
// Default is false.
template <typename T, typename IndType>
void ArgFullSort(const platform::CUDADeviceContext& ctx, const Tensor* input,
Tensor* output, Tensor* indices, const IndType num_rows,
const IndType num_cols, const bool descending) {
auto cu_stream = ctx.stream();
Tensor input_indices;
const std::vector<IndType> dims = {num_rows, num_cols};
auto dim = framework::make_ddim(dims);
input_indices.Resize(dim);
input_indices.mutable_data<IndType>(ctx.GetPlace());
size_t temp_storage_bytes = -1;
auto ComputeBlockSize = [](IndType col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(num_cols);
int maxGridDimX = ctx.GetCUDAMaxGridDimSize().x;
// actually, int num_rows < max_grid_size
int grid_size = num_rows < maxGridDimX ? num_rows : maxGridDimX;
// Init a index array
hipLaunchKernelGGL(( FillIndex), dim3(grid_size), dim3(block_size), 0, cu_stream,
input_indices.data<IndType>(), num_rows, num_cols);
T* sorted_out_ptr;
IndType* sorted_indices_ptr;
const T* inp = input->data<T>();
T* out = output->mutable_data<T>(ctx.GetPlace());
IndType* ind = indices->mutable_data<IndType>(ctx.GetPlace());
sorted_out_ptr = out;
sorted_indices_ptr = ind;
// create iter for counting input
hipcub::CountingInputIterator<IndType> counting_iter(0);
// segment_offset is used for move to next row
hipcub::TransformInputIterator<IndType, SegmentOffsetIter,
hipcub::CountingInputIterator<IndType>>
segment_offsets_t(counting_iter, SegmentOffsetIter(num_cols));
hipError_t err;
if (descending) {
err = hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
nullptr, temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
} else {
err = hipcub::DeviceSegmentedRadixSort::SortPairs(
nullptr, temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
}
PADDLE_ENFORCE_CUDA_SUCCESS(
err,
"ArgSortOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairsDescending to calculate"
"temp_storage_bytes, status:%s.",
temp_storage_bytes, hipGetErrorString(err));
Tensor temp_storage;
temp_storage.mutable_data<uint8_t>(ctx.GetPlace(), temp_storage_bytes);
if (descending) {
err = hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
temp_storage.data<uint8_t>(), temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
} else {
err = hipcub::DeviceSegmentedRadixSort::SortPairs(
temp_storage.data<uint8_t>(), temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
}
PADDLE_ENFORCE_CUDA_SUCCESS(
err,
"ArgSortOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairsDescending to sort input, "
"temp_storage_bytes:%d status:%s.",
temp_storage_bytes, hipGetErrorString(err));
}
template <typename T, typename IndType>
void ArgFullAssign(const platform::CUDADeviceContext& ctx, const Tensor* dO,
const Tensor* indices, Tensor* dX, const IndType num_rows,
const IndType num_cols) {
auto cu_stream = ctx.stream();
auto ComputeBlockSize = [](IndType col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(num_cols);
int maxGridDimX = ctx.GetCUDAMaxGridDimSize().x;
// actually, int num_rows < max_grid_size
int grid_size = num_rows < maxGridDimX ? num_rows : maxGridDimX;
hipLaunchKernelGGL(( FillGrad), dim3(grid_size), dim3(block_size), 0, cu_stream,
dO->data<T>(), indices->data<IndType>(), dX->data<T>(), num_rows,
num_cols);
}
template <typename T>
class ArgsortOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
int axis = ctx.Attr<int>("axis");
bool descending = ctx.Attr<bool>("descending");
auto in_dims = input->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
int64_t numel = input->numel();
int64_t groups = numel / in_dims[axis];
// Special case for full sort, speedup ~190x.
if (axis == -1 || axis + 1 == in_dims.size()) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
ArgFullSort<T, int64_t>(dev_ctx, input, output, indices, input_height,
input_width, descending);
} else {
// if not full sort, do transpose first
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.push_back(i);
}
trans.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.push_back(i);
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
Tensor trans_inp;
T* trans_inp_data = trans_inp.mutable_data<T>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
// Do transpose
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *input,
&trans_inp, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
Tensor tmp_out;
tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
T* out_data = output->mutable_data<T>(ctx.GetPlace());
Tensor tmp_indices;
// temp indices for sorting
tmp_indices.mutable_data<int64_t>(trans_dims, ctx.GetPlace());
indices->mutable_data<int64_t>(ctx.GetPlace());
ArgFullSort<T, int64_t>(dev_ctx, &trans_inp, &tmp_out, &tmp_indices,
input_height, input_width, descending);
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, tmp_indices, indices, trans);
// transpose back
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, tmp_out,
output, trans);
return;
}
}
};
template <typename T>
class ArgsortGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* indices = ctx.Input<Tensor>("Indices");
auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dO = ctx.Input<Tensor>(framework::GradVarName("Out"));
int axis = ctx.Attr<int>("axis");
dX->mutable_data<T>(ctx.GetPlace());
auto dxt = framework::EigenVector<T>::Flatten(*dX);
auto& place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
dxt.device(place) = dxt.constant(static_cast<T>(0));
if (dO->numel() == 0) return;
auto in_dims = indices->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
int64_t numel = indices->numel();
// Special case for full sort, speedup ~190x.
if (axis == -1 || axis + 1 == in_dims.size()) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
ArgFullAssign<T, int64_t>(dev_ctx, dO, indices, dX, input_height,
input_width);
} else {
// if not full sort, do transpose first
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.push_back(i);
}
trans.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.push_back(i);
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
Tensor trans_dO;
trans_dO.mutable_data<T>(trans_dims, ctx.GetPlace());
Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
// Do transpose
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *dO,
&trans_dO, trans);
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, *indices, &trans_ind, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
Tensor tmp_out;
tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
ArgFullAssign<T, int64_t>(dev_ctx, &trans_dO, &trans_ind, &tmp_out,
input_height, input_width);
// transpose back
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, tmp_out, dX,
trans);
return;
}
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
argsort, paddle::operators::ArgsortOpCUDAKernel<float>,
paddle::operators::ArgsortOpCUDAKernel<double>,
paddle::operators::ArgsortOpCUDAKernel<int>,
paddle::operators::ArgsortOpCUDAKernel<int64_t>,
paddle::operators::ArgsortOpCUDAKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
argsort_grad, paddle::operators::ArgsortGradOpCUDAKernel<float>,
paddle::operators::ArgsortGradOpCUDAKernel<double>,
paddle::operators::ArgsortGradOpCUDAKernel<int>,
paddle::operators::ArgsortGradOpCUDAKernel<int64_t>,
paddle::operators::ArgsortGradOpCUDAKernel<paddle::platform::float16>);
| a2b0f2b61d589faffde19b5afc6905ff3e2c4103.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include "cub/cub.cuh"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/argsort_op.h"
#include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/platform/cuda_device_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
// set cub base traits in order to handle float16
namespace cub {
template <>
struct NumericTraits<paddle::platform::float16>
: BaseTraits<FLOATING_POINT, true, false, uint16_t,
paddle::platform::float16> {};
} // namespace cub
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
// Iter for move to next row
struct SegmentOffsetIter {
EIGEN_DEVICE_FUNC
explicit SegmentOffsetIter(int num_cols) : num_cols_(num_cols) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator()(int idx) const {
return idx * num_cols_;
}
int num_cols_;
};
template <typename T>
static __global__ void FillIndex(T* indices, T num_rows, T num_cols) {
int col_id = threadIdx.x;
int row_id = blockIdx.x;
for (T j = row_id; j < num_rows; j += gridDim.x) {
for (T i = col_id; i < num_cols; i += blockDim.x) {
indices[j * num_cols + i] = i;
}
}
}
template <typename T, typename IndType>
static __global__ void FillGrad(const T* dO, const IndType* indices, T* dX,
IndType num_rows, IndType num_cols) {
int col_id = threadIdx.x;
int row_id = blockIdx.x;
for (IndType j = row_id; j < num_rows; j += gridDim.x) {
for (IndType i = col_id; i < num_cols; i += blockDim.x) {
dX[j * num_cols + indices[j * num_cols + i]] = dO[j * num_cols + i];
}
}
}
// Sort by flag descending, True: descending. False: Ascending.
// Default is false.
template <typename T, typename IndType>
void ArgFullSort(const platform::CUDADeviceContext& ctx, const Tensor* input,
Tensor* output, Tensor* indices, const IndType num_rows,
const IndType num_cols, const bool descending) {
auto cu_stream = ctx.stream();
Tensor input_indices;
const std::vector<IndType> dims = {num_rows, num_cols};
auto dim = framework::make_ddim(dims);
input_indices.Resize(dim);
input_indices.mutable_data<IndType>(ctx.GetPlace());
size_t temp_storage_bytes = -1;
auto ComputeBlockSize = [](IndType col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(num_cols);
int maxGridDimX = ctx.GetCUDAMaxGridDimSize().x;
// actually, int num_rows < max_grid_size
int grid_size = num_rows < maxGridDimX ? num_rows : maxGridDimX;
// Init a index array
FillIndex<<<grid_size, block_size, 0, cu_stream>>>(
input_indices.data<IndType>(), num_rows, num_cols);
T* sorted_out_ptr;
IndType* sorted_indices_ptr;
const T* inp = input->data<T>();
T* out = output->mutable_data<T>(ctx.GetPlace());
IndType* ind = indices->mutable_data<IndType>(ctx.GetPlace());
sorted_out_ptr = out;
sorted_indices_ptr = ind;
// create iter for counting input
cub::CountingInputIterator<IndType> counting_iter(0);
// segment_offset is used for move to next row
cub::TransformInputIterator<IndType, SegmentOffsetIter,
cub::CountingInputIterator<IndType>>
segment_offsets_t(counting_iter, SegmentOffsetIter(num_cols));
cudaError_t err;
if (descending) {
err = cub::DeviceSegmentedRadixSort::SortPairsDescending(
nullptr, temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
} else {
err = cub::DeviceSegmentedRadixSort::SortPairs(
nullptr, temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
}
PADDLE_ENFORCE_CUDA_SUCCESS(
err,
"ArgSortOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairsDescending to calculate"
"temp_storage_bytes, status:%s.",
temp_storage_bytes, cudaGetErrorString(err));
Tensor temp_storage;
temp_storage.mutable_data<uint8_t>(ctx.GetPlace(), temp_storage_bytes);
if (descending) {
err = cub::DeviceSegmentedRadixSort::SortPairsDescending(
temp_storage.data<uint8_t>(), temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
} else {
err = cub::DeviceSegmentedRadixSort::SortPairs(
temp_storage.data<uint8_t>(), temp_storage_bytes, inp, sorted_out_ptr,
input_indices.data<IndType>(), sorted_indices_ptr, num_cols * num_rows,
num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8,
cu_stream);
}
PADDLE_ENFORCE_CUDA_SUCCESS(
err,
"ArgSortOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairsDescending to sort input, "
"temp_storage_bytes:%d status:%s.",
temp_storage_bytes, cudaGetErrorString(err));
}
template <typename T, typename IndType>
void ArgFullAssign(const platform::CUDADeviceContext& ctx, const Tensor* dO,
const Tensor* indices, Tensor* dX, const IndType num_rows,
const IndType num_cols) {
auto cu_stream = ctx.stream();
auto ComputeBlockSize = [](IndType col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
};
int block_size = ComputeBlockSize(num_cols);
int maxGridDimX = ctx.GetCUDAMaxGridDimSize().x;
// actually, int num_rows < max_grid_size
int grid_size = num_rows < maxGridDimX ? num_rows : maxGridDimX;
FillGrad<<<grid_size, block_size, 0, cu_stream>>>(
dO->data<T>(), indices->data<IndType>(), dX->data<T>(), num_rows,
num_cols);
}
template <typename T>
class ArgsortOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* indices = ctx.Output<Tensor>("Indices");
int axis = ctx.Attr<int>("axis");
bool descending = ctx.Attr<bool>("descending");
auto in_dims = input->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
int64_t numel = input->numel();
int64_t groups = numel / in_dims[axis];
// Special case for full sort, speedup ~190x.
if (axis == -1 || axis + 1 == in_dims.size()) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
ArgFullSort<T, int64_t>(dev_ctx, input, output, indices, input_height,
input_width, descending);
} else {
// if not full sort, do transpose first
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.push_back(i);
}
trans.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.push_back(i);
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
Tensor trans_inp;
T* trans_inp_data = trans_inp.mutable_data<T>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
// Do transpose
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *input,
&trans_inp, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
Tensor tmp_out;
tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
T* out_data = output->mutable_data<T>(ctx.GetPlace());
Tensor tmp_indices;
// temp indices for sorting
tmp_indices.mutable_data<int64_t>(trans_dims, ctx.GetPlace());
indices->mutable_data<int64_t>(ctx.GetPlace());
ArgFullSort<T, int64_t>(dev_ctx, &trans_inp, &tmp_out, &tmp_indices,
input_height, input_width, descending);
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, tmp_indices, indices, trans);
// transpose back
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, tmp_out,
output, trans);
return;
}
}
};
template <typename T>
class ArgsortGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* indices = ctx.Input<Tensor>("Indices");
auto* dX = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dO = ctx.Input<Tensor>(framework::GradVarName("Out"));
int axis = ctx.Attr<int>("axis");
dX->mutable_data<T>(ctx.GetPlace());
auto dxt = framework::EigenVector<T>::Flatten(*dX);
auto& place = *ctx.template device_context<platform::CUDADeviceContext>()
.eigen_device();
dxt.device(place) = dxt.constant(static_cast<T>(0));
if (dO->numel() == 0) return;
auto in_dims = indices->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
int64_t numel = indices->numel();
// Special case for full sort, speedup ~190x.
if (axis == -1 || axis + 1 == in_dims.size()) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
const auto& dev_ctx = ctx.cuda_device_context();
ArgFullAssign<T, int64_t>(dev_ctx, dO, indices, dX, input_height,
input_width);
} else {
// if not full sort, do transpose first
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.push_back(i);
}
trans.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.push_back(i);
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
Tensor trans_dO;
trans_dO.mutable_data<T>(trans_dims, ctx.GetPlace());
Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
const auto& dev_ctx = ctx.cuda_device_context();
// Do transpose
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, *dO,
&trans_dO, trans);
TransCompute<platform::CUDADeviceContext, int64_t>(
ndims, dev_ctx, *indices, &trans_ind, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
Tensor tmp_out;
tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
ArgFullAssign<T, int64_t>(dev_ctx, &trans_dO, &trans_ind, &tmp_out,
input_height, input_width);
// transpose back
TransCompute<platform::CUDADeviceContext, T>(ndims, dev_ctx, tmp_out, dX,
trans);
return;
}
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
argsort, paddle::operators::ArgsortOpCUDAKernel<float>,
paddle::operators::ArgsortOpCUDAKernel<double>,
paddle::operators::ArgsortOpCUDAKernel<int>,
paddle::operators::ArgsortOpCUDAKernel<int64_t>,
paddle::operators::ArgsortOpCUDAKernel<paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
argsort_grad, paddle::operators::ArgsortGradOpCUDAKernel<float>,
paddle::operators::ArgsortGradOpCUDAKernel<double>,
paddle::operators::ArgsortGradOpCUDAKernel<int>,
paddle::operators::ArgsortGradOpCUDAKernel<int64_t>,
paddle::operators::ArgsortGradOpCUDAKernel<paddle::platform::float16>);
|
b074fc97da68dcc57ac496a450e404019c3a9344.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cassert>
#include "params.h"
#include "voronoi.h"
#include "stopwatch.h"
#include "CPU/voronoi_fallback.h"
void get_bbox(const std::vector<float>& xyz, float& xmin, float& ymin, float& zmin, float& xmax, float& ymax, float& zmax) {
int nb_v = xyz.size()/3;
xmin = xmax = xyz[0];
ymin = ymax = xyz[1];
zmin = zmax = xyz[2];
for(int i=1; i<nb_v; ++i) {
xmin = ::min(xmin, xyz[3*i]);
ymin = ::min(ymin, xyz[3*i+1]);
zmin = ::min(zmin, xyz[3*i+2]);
xmax = ::max(xmax, xyz[3*i]);
ymax = ::max(ymax, xyz[3*i+1]);
zmax = ::max(zmax, xyz[3*i+2]);
}
float d = xmax-xmin;
d = ::max(d, ymax-ymin);
d = ::max(d, zmax-zmin);
d = 0.001f*d;
xmin -= d;
ymin -= d;
zmin -= d;
xmax += d;
ymax += d;
zmax += d;
}
bool load_file(const char* filename, std::vector<float>& xyz, bool normalize=true) {
std::ifstream in;
in.open(filename, std::ifstream::in);
if (in.fail()) return false;
std::string line;
int npts = 0;
bool firstline = true;
float x,y,z;
while (!in.eof()) {
std::getline(in, line);
if (!line.length()) continue;
std::istringstream iss(line.c_str());
if (firstline) {
iss >> npts;
firstline = false;
} else {
iss >> x >> y >> z;
xyz.push_back(x);
xyz.push_back(y);
xyz.push_back(z);
}
}
assert(xyz.size() == npts*3);
in.close();
if (normalize) { // normalize point cloud between [0,1000]^3
float xmin,ymin,zmin,xmax,ymax,zmax;
get_bbox(xyz, xmin, ymin, zmin, xmax, ymax, zmax);
float maxside = ::max(::max(xmax-xmin, ymax-ymin), zmax-zmin);
#pragma omp parallel for
for (int i=0; i<xyz.size()/3; i++) {
xyz[i*3+0] = 1000.f*(xyz[i*3+0]-xmin)/maxside;
xyz[i*3+1] = 1000.f*(xyz[i*3+1]-ymin)/maxside;
xyz[i*3+2] = 1000.f*(xyz[i*3+2]-zmin)/maxside;
}
get_bbox(xyz, xmin, ymin, zmin, xmax, ymax, zmax);
std::cerr << "bbox [" << xmin << ":" << xmax << "], [" << ymin << ":" << ymax << "], [" << zmin << ":" << zmax << "]" << std::endl;
}
return true;
}
void drop_xyz_file(std::vector<float>& pts, const char *filename) {
std::fstream file;
file.open(filename, std::ios_base::out);
int k = 0;
for (unsigned int i = 0; i < pts.size() / 4; i++){
if (pts[4 * i + 3] > 0) {
k++;
}
}
file << k << std::endl;
for (unsigned int i = 0; i < pts.size() / 4; i++){
if (pts[4 * i + 3] > 0) {
file << pts[4 * i] / pts[4 * i + 3] << " " << pts[4 * i + 1] / pts[4 * i + 3] << " " << pts[4 * i + 2] / pts[4 * i + 3] << std::endl;
}
}
file.close();
}
void printDevProp() {
int devCount; // Number of CUDA devices
hipError_t err = hipGetDeviceCount(&devCount);
if (err != hipSuccess) {
std::cerr << "Failed to initialize CUDA / failed to count CUDA devices (error code << "
<< hipGetErrorString(err) << ")! [file: " << __FILE__ << ", line: " << __LINE__ << "]" << std::endl;
exit(1);
}
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i=0; i<devCount; ++i) {
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
int main(int argc, char** argv) {
initialize_geogram(argc, argv);
printDevProp();
if (2>argc) {
std::cerr << "Usage: " << argv[0] << " points.xyz" << std::endl;
return 1;
}
int *initptr = NULL;
hipError_t err = hipMalloc(&initptr, sizeof(int)); // unused memory, needed for initialize the GPU before time measurements
if (err != hipSuccess) {
std::cerr << "Failed to allocate (error code << " << hipGetErrorString(err) << ")! [file: " << __FILE__ << ", line: " << __LINE__ << "]" << std::endl;
return 1;
}
std::vector<float> pts;
if (!load_file(argv[1], pts, false)) {
std::cerr << argv[1] << ": could not load file" << std::endl;
return 1;
}
int nb_pts = pts.size()/3;
std::cout << "number of points" << pts.size()/3 << std::endl;
std::vector<float> tet_pts;
tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(187.95); tet_pts.push_back(230.46); tet_pts.push_back(163.79); tet_pts.push_back(0);
std::vector<int> tet_indices;
tet_indices.push_back(0); tet_indices.push_back(2); tet_indices.push_back(3); tet_indices.push_back(8);
tet_indices.push_back(6); tet_indices.push_back(3); tet_indices.push_back(8); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(3); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(5); tet_indices.push_back(0);
tet_indices.push_back(8); tet_indices.push_back(1); tet_indices.push_back(5); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(0); tet_indices.push_back(3);
tet_indices.push_back(5); tet_indices.push_back(8); tet_indices.push_back(4); tet_indices.push_back(0);
tet_indices.push_back(5); tet_indices.push_back(6); tet_indices.push_back(8); tet_indices.push_back(7);
tet_indices.push_back(5); tet_indices.push_back(6); tet_indices.push_back(4); tet_indices.push_back(8);
tet_indices.push_back(4); tet_indices.push_back(2); tet_indices.push_back(0); tet_indices.push_back(8);
tet_indices.push_back(6); tet_indices.push_back(3); tet_indices.push_back(2); tet_indices.push_back(8);
tet_indices.push_back(2); tet_indices.push_back(4); tet_indices.push_back(6); tet_indices.push_back(8);
// Choose between GPU and CPU.
bool gpu = false;
bool cpu = true;
if (gpu) {
std::vector<int> KNN;
std::vector<float> bary(nb_pts * 4, 0);
std::vector<Status> stat(nb_pts, security_radius_not_reached);
compute_voro_diagram_GPU(pts, stat, bary, tet_pts, tet_indices, &KNN);
// Now computes on the CPU the cells that were not
// sucessfully computed on the GPU (the ones for
// which stat[v] != success).
//fallback_voro_diagram_CPU(pts, stat, bary, KNN);
drop_xyz_file(bary, "gpu.xyz");
}
if (cpu) {
std::vector<int> KNN;
std::vector<float> bary(nb_pts * 4, 0);
std::vector<Status> stat(nb_pts, security_radius_not_reached);
compute_voro_diagram_CPU(pts, stat, bary, tet_pts, tet_indices, &KNN);
//fallback_voro_diagram_CPU(pts, stat, bary, KNN);
drop_xyz_file(bary, "cpu.xyz");
}
hipFree(initptr);
return 0;
}
| b074fc97da68dcc57ac496a450e404019c3a9344.cu | #include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cassert>
#include "params.h"
#include "voronoi.h"
#include "stopwatch.h"
#include "CPU/voronoi_fallback.h"
void get_bbox(const std::vector<float>& xyz, float& xmin, float& ymin, float& zmin, float& xmax, float& ymax, float& zmax) {
int nb_v = xyz.size()/3;
xmin = xmax = xyz[0];
ymin = ymax = xyz[1];
zmin = zmax = xyz[2];
for(int i=1; i<nb_v; ++i) {
xmin = std::min(xmin, xyz[3*i]);
ymin = std::min(ymin, xyz[3*i+1]);
zmin = std::min(zmin, xyz[3*i+2]);
xmax = std::max(xmax, xyz[3*i]);
ymax = std::max(ymax, xyz[3*i+1]);
zmax = std::max(zmax, xyz[3*i+2]);
}
float d = xmax-xmin;
d = std::max(d, ymax-ymin);
d = std::max(d, zmax-zmin);
d = 0.001f*d;
xmin -= d;
ymin -= d;
zmin -= d;
xmax += d;
ymax += d;
zmax += d;
}
bool load_file(const char* filename, std::vector<float>& xyz, bool normalize=true) {
std::ifstream in;
in.open(filename, std::ifstream::in);
if (in.fail()) return false;
std::string line;
int npts = 0;
bool firstline = true;
float x,y,z;
while (!in.eof()) {
std::getline(in, line);
if (!line.length()) continue;
std::istringstream iss(line.c_str());
if (firstline) {
iss >> npts;
firstline = false;
} else {
iss >> x >> y >> z;
xyz.push_back(x);
xyz.push_back(y);
xyz.push_back(z);
}
}
assert(xyz.size() == npts*3);
in.close();
if (normalize) { // normalize point cloud between [0,1000]^3
float xmin,ymin,zmin,xmax,ymax,zmax;
get_bbox(xyz, xmin, ymin, zmin, xmax, ymax, zmax);
float maxside = std::max(std::max(xmax-xmin, ymax-ymin), zmax-zmin);
#pragma omp parallel for
for (int i=0; i<xyz.size()/3; i++) {
xyz[i*3+0] = 1000.f*(xyz[i*3+0]-xmin)/maxside;
xyz[i*3+1] = 1000.f*(xyz[i*3+1]-ymin)/maxside;
xyz[i*3+2] = 1000.f*(xyz[i*3+2]-zmin)/maxside;
}
get_bbox(xyz, xmin, ymin, zmin, xmax, ymax, zmax);
std::cerr << "bbox [" << xmin << ":" << xmax << "], [" << ymin << ":" << ymax << "], [" << zmin << ":" << zmax << "]" << std::endl;
}
return true;
}
void drop_xyz_file(std::vector<float>& pts, const char *filename) {
std::fstream file;
file.open(filename, std::ios_base::out);
int k = 0;
for (unsigned int i = 0; i < pts.size() / 4; i++){
if (pts[4 * i + 3] > 0) {
k++;
}
}
file << k << std::endl;
for (unsigned int i = 0; i < pts.size() / 4; i++){
if (pts[4 * i + 3] > 0) {
file << pts[4 * i] / pts[4 * i + 3] << " " << pts[4 * i + 1] / pts[4 * i + 3] << " " << pts[4 * i + 2] / pts[4 * i + 3] << std::endl;
}
}
file.close();
}
void printDevProp() {
int devCount; // Number of CUDA devices
cudaError_t err = cudaGetDeviceCount(&devCount);
if (err != cudaSuccess) {
std::cerr << "Failed to initialize CUDA / failed to count CUDA devices (error code << "
<< cudaGetErrorString(err) << ")! [file: " << __FILE__ << ", line: " << __LINE__ << "]" << std::endl;
exit(1);
}
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i=0; i<devCount; ++i) {
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
int main(int argc, char** argv) {
initialize_geogram(argc, argv);
printDevProp();
if (2>argc) {
std::cerr << "Usage: " << argv[0] << " points.xyz" << std::endl;
return 1;
}
int *initptr = NULL;
cudaError_t err = cudaMalloc(&initptr, sizeof(int)); // unused memory, needed for initialize the GPU before time measurements
if (err != cudaSuccess) {
std::cerr << "Failed to allocate (error code << " << cudaGetErrorString(err) << ")! [file: " << __FILE__ << ", line: " << __LINE__ << "]" << std::endl;
return 1;
}
std::vector<float> pts;
if (!load_file(argv[1], pts, false)) {
std::cerr << argv[1] << ": could not load file" << std::endl;
return 1;
}
int nb_pts = pts.size()/3;
std::cout << "number of points" << pts.size()/3 << std::endl;
std::vector<float> tet_pts;
tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(100); tet_pts.push_back(0);
tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(300); tet_pts.push_back(0);
tet_pts.push_back(187.95); tet_pts.push_back(230.46); tet_pts.push_back(163.79); tet_pts.push_back(0);
std::vector<int> tet_indices;
tet_indices.push_back(0); tet_indices.push_back(2); tet_indices.push_back(3); tet_indices.push_back(8);
tet_indices.push_back(6); tet_indices.push_back(3); tet_indices.push_back(8); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(3); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(5); tet_indices.push_back(0);
tet_indices.push_back(8); tet_indices.push_back(1); tet_indices.push_back(5); tet_indices.push_back(7);
tet_indices.push_back(1); tet_indices.push_back(8); tet_indices.push_back(0); tet_indices.push_back(3);
tet_indices.push_back(5); tet_indices.push_back(8); tet_indices.push_back(4); tet_indices.push_back(0);
tet_indices.push_back(5); tet_indices.push_back(6); tet_indices.push_back(8); tet_indices.push_back(7);
tet_indices.push_back(5); tet_indices.push_back(6); tet_indices.push_back(4); tet_indices.push_back(8);
tet_indices.push_back(4); tet_indices.push_back(2); tet_indices.push_back(0); tet_indices.push_back(8);
tet_indices.push_back(6); tet_indices.push_back(3); tet_indices.push_back(2); tet_indices.push_back(8);
tet_indices.push_back(2); tet_indices.push_back(4); tet_indices.push_back(6); tet_indices.push_back(8);
// Choose between GPU and CPU.
bool gpu = false;
bool cpu = true;
if (gpu) {
std::vector<int> KNN;
std::vector<float> bary(nb_pts * 4, 0);
std::vector<Status> stat(nb_pts, security_radius_not_reached);
compute_voro_diagram_GPU(pts, stat, bary, tet_pts, tet_indices, &KNN);
// Now computes on the CPU the cells that were not
// sucessfully computed on the GPU (the ones for
// which stat[v] != success).
//fallback_voro_diagram_CPU(pts, stat, bary, KNN);
drop_xyz_file(bary, "gpu.xyz");
}
if (cpu) {
std::vector<int> KNN;
std::vector<float> bary(nb_pts * 4, 0);
std::vector<Status> stat(nb_pts, security_radius_not_reached);
compute_voro_diagram_CPU(pts, stat, bary, tet_pts, tet_indices, &KNN);
//fallback_voro_diagram_CPU(pts, stat, bary, KNN);
drop_xyz_file(bary, "cpu.xyz");
}
cudaFree(initptr);
return 0;
}
|
712139330a1589f70080a88e720f36bc8d7ebc1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <unistd.h>
#include <stdio.h>
__global__ //set global so this runs on the GPU
void multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol, const float aSize, const float bSize) {
int index = threadIdx.x;
int stride = blockDim.x;
int block = blockIdx.x;
for(int i = 0; i < aRow; i += 1)
for(int j = index + (block * stride); j < bCol; j+= stride)
for(int k = 0; k < aCol; k += 1)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
__syncthreads(); //wait for all threads to finish
return;
}
void scalar_multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol) {
//run this on cpu to check for errors
for(int i = 0; i < aRow; ++i)
for(int j = 0; j < bCol; ++j)
for(int k = 0; k < aCol; ++k)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
return;
}
int main(int argc, char *argv[])
{
if(argc < 5) {
std::cerr << "usage: " << argv[0] << " aRows aCols bRows bCols\n";
return(-1);
}
if(atoi(argv[2]) != atoi(argv[3])) {
std::cerr << "error! aCols must match bRows. " <<
argv[2] << ", " << argv[3] << std::endl;
return(-1);
}
srand(4); //so creative
int errorcheck = 0;
int threads = 512;
int blocks = 512;
int DEBUG = 0;
//accept 4 args: row, col, for a and b
float aRow = atoi(argv[1]);
float aCol = atoi(argv[2]);
float bRow = atoi(argv[3]);
float bCol = atoi(argv[4]);
float cRow = aRow;
float cCol = bCol;
float aSize = aRow * aCol * sizeof(float);
float bSize = bRow * bCol * sizeof(float);
float cSize = cRow * cCol * sizeof(float);
float *a = (float *)malloc(aSize);
float *b = (float *)malloc(bSize);
float *c = (float *)malloc(cSize);
float *cu_a;
float *cu_b;
float *cu_c;
//malloc shared memory that can be accessed via GPU and CPU
hipMallocManaged(&cu_a, aSize);
hipMallocManaged(&cu_b, bSize);
hipMallocManaged(&cu_c, cSize);
//initialize them to randoms
for(int i = 0; i < aRow*aCol; i++) {
a[i] = cu_a[i] = rand() % 1000;
}
for(int i = 0; i < bRow*bCol; i++) {
b[i] = cu_b[i] = rand() % 1000;
}
for(int i = 0; i < aRow*bCol; i++) {
c[i] = cu_c[i] = 0;
}
//warmup
std::cout << "warming up...\n";
hipLaunchKernelGGL(( multiply), dim3(blocks), dim3(threads), 0, 0, cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//after warming up, set memory back to 0
hipMemset(cu_c, 0, cSize);
std::cout << "done.\nrunning tests...\n";
if(errorcheck){
//run a CPU version to check for errors
scalar_multiply(a, b, c, aRow, aCol, bCol);
}
double fulltime = 0;
int repeats = 1;
for(int i=0; i<repeats; i++) {
//reset memory to zeros
hipMemset(cu_c, 0, cSize);
std::clock_t start = std::clock();
hipLaunchKernelGGL(( multiply), dim3(blocks), dim3(threads), 0, 0, cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//wait for all threads to finish before "timing" the code
hipDeviceSynchronize();
std::clock_t end = std::clock();
fulltime += (end - start);
}
if(DEBUG) {
//print every entry
for(int i=0; i<aRow*bCol; i++)
std::cerr << "c[" << i << "]\t" << (c[i] == cu_c[i] ? "\x1B[32mPASS\x1B[0m\t" : "\x1B[31mFAIL\x1B[0m\t") << c[i] << " " << cu_c[i] << std::endl;
}
int arraycheck = 1;
if(errorcheck) {
//run error checking
for(int i=0; i<aRow*bCol; i++)
if(c[i] != cu_c[i])
arraycheck = 0;
std::cout << (arraycheck ? "\x1B[32mPASS\x1B[0m" : "\x1B[31mFAIL\x1B[0m") << std::endl;
}
float flops = (aRow*aCol*bCol*2);
double s_time = ((fulltime) / (double)(CLOCKS_PER_SEC));
std::cout << "a[" << aRow << "," << aCol << "], b[" << bRow << "," << bCol << "], c[" << cRow << "," << cCol << "]\n";
std::cout << "time: " << s_time*1000 << "ms\n";
std::cout << "performance: " << flops << " flops at " << (((float)flops / 1000000000) / ((s_time) / repeats)) << "GFlop/s\n";
if(DEBUG) {
//printout
for(int i=0; i<aRow * bCol; i++)
std::cerr << c[i] << " ";
std::cerr << std::endl;
}
//free shared memory
hipFree(cu_a);
hipFree(cu_b);
hipFree(cu_c);
//free cpu memory
free(a);
free(b);
free(c);
return 0;
}
| 712139330a1589f70080a88e720f36bc8d7ebc1d.cu |
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <unistd.h>
#include <stdio.h>
__global__ //set global so this runs on the GPU
void multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol, const float aSize, const float bSize) {
int index = threadIdx.x;
int stride = blockDim.x;
int block = blockIdx.x;
for(int i = 0; i < aRow; i += 1)
for(int j = index + (block * stride); j < bCol; j+= stride)
for(int k = 0; k < aCol; k += 1)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
__syncthreads(); //wait for all threads to finish
return;
}
void scalar_multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol) {
//run this on cpu to check for errors
for(int i = 0; i < aRow; ++i)
for(int j = 0; j < bCol; ++j)
for(int k = 0; k < aCol; ++k)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
return;
}
int main(int argc, char *argv[])
{
if(argc < 5) {
std::cerr << "usage: " << argv[0] << " aRows aCols bRows bCols\n";
return(-1);
}
if(atoi(argv[2]) != atoi(argv[3])) {
std::cerr << "error! aCols must match bRows. " <<
argv[2] << ", " << argv[3] << std::endl;
return(-1);
}
srand(4); //so creative
int errorcheck = 0;
int threads = 512;
int blocks = 512;
int DEBUG = 0;
//accept 4 args: row, col, for a and b
float aRow = atoi(argv[1]);
float aCol = atoi(argv[2]);
float bRow = atoi(argv[3]);
float bCol = atoi(argv[4]);
float cRow = aRow;
float cCol = bCol;
float aSize = aRow * aCol * sizeof(float);
float bSize = bRow * bCol * sizeof(float);
float cSize = cRow * cCol * sizeof(float);
float *a = (float *)malloc(aSize);
float *b = (float *)malloc(bSize);
float *c = (float *)malloc(cSize);
float *cu_a;
float *cu_b;
float *cu_c;
//malloc shared memory that can be accessed via GPU and CPU
cudaMallocManaged(&cu_a, aSize);
cudaMallocManaged(&cu_b, bSize);
cudaMallocManaged(&cu_c, cSize);
//initialize them to randoms
for(int i = 0; i < aRow*aCol; i++) {
a[i] = cu_a[i] = rand() % 1000;
}
for(int i = 0; i < bRow*bCol; i++) {
b[i] = cu_b[i] = rand() % 1000;
}
for(int i = 0; i < aRow*bCol; i++) {
c[i] = cu_c[i] = 0;
}
//warmup
std::cout << "warming up...\n";
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//after warming up, set memory back to 0
cudaMemset(cu_c, 0, cSize);
std::cout << "done.\nrunning tests...\n";
if(errorcheck){
//run a CPU version to check for errors
scalar_multiply(a, b, c, aRow, aCol, bCol);
}
double fulltime = 0;
int repeats = 1;
for(int i=0; i<repeats; i++) {
//reset memory to zeros
cudaMemset(cu_c, 0, cSize);
std::clock_t start = std::clock();
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//wait for all threads to finish before "timing" the code
cudaDeviceSynchronize();
std::clock_t end = std::clock();
fulltime += (end - start);
}
if(DEBUG) {
//print every entry
for(int i=0; i<aRow*bCol; i++)
std::cerr << "c[" << i << "]\t" << (c[i] == cu_c[i] ? "\x1B[32mPASS\x1B[0m\t" : "\x1B[31mFAIL\x1B[0m\t") << c[i] << " " << cu_c[i] << std::endl;
}
int arraycheck = 1;
if(errorcheck) {
//run error checking
for(int i=0; i<aRow*bCol; i++)
if(c[i] != cu_c[i])
arraycheck = 0;
std::cout << (arraycheck ? "\x1B[32mPASS\x1B[0m" : "\x1B[31mFAIL\x1B[0m") << std::endl;
}
float flops = (aRow*aCol*bCol*2);
double s_time = ((fulltime) / (double)(CLOCKS_PER_SEC));
std::cout << "a[" << aRow << "," << aCol << "], b[" << bRow << "," << bCol << "], c[" << cRow << "," << cCol << "]\n";
std::cout << "time: " << s_time*1000 << "ms\n";
std::cout << "performance: " << flops << " flops at " << (((float)flops / 1000000000) / ((s_time) / repeats)) << "GFlop/s\n";
if(DEBUG) {
//printout
for(int i=0; i<aRow * bCol; i++)
std::cerr << c[i] << " ";
std::cerr << std::endl;
}
//free shared memory
cudaFree(cu_a);
cudaFree(cu_b);
cudaFree(cu_c);
//free cpu memory
free(a);
free(b);
free(c);
return 0;
}
|
ae8c1c2baadea3669770cdf1dddc1f1d7919fcf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void pyrup_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2));
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=0;
d_out[color_tid+1]=0;//d_in[color_tid1+1];
d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrup_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (xIndex/2)* colorWidthStep + yIndex/2;
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=255;
//d_out[color_tid+1]=0;//d_in[color_tid1+1];
//d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrdown_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex));
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
__global__ void pyrdown_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (2*xIndex)* colorWidthStep + 2*yIndex;
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
} | ae8c1c2baadea3669770cdf1dddc1f1d7919fcf7.cu | extern "C" {
__global__ void pyrup_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2));
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=0;
d_out[color_tid+1]=0;//d_in[color_tid1+1];
d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrup_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (xIndex/2)* colorWidthStep + yIndex/2;
if(yIndex >=width || xIndex>=height)
{
return;
}
if(yIndex%2==0 &&xIndex%2==0)
{
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
else
{
d_out[color_tid]=255;
//d_out[color_tid+1]=0;//d_in[color_tid1+1];
//d_out[color_tid+2]=0;//d_in[color_tid1+2];
}
}
__global__ void pyrdown_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex));
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
d_out[color_tid+1]=d_in[color_tid1+1];
d_out[color_tid+2]=d_in[color_tid1+2];
}
__global__ void pyrdown_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
{
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int color_tid = (xIndex)* aabhas + yIndex;
const int color_tid1= (2*xIndex)* colorWidthStep + 2*yIndex;
if(yIndex >=width || xIndex>=height)
{
return;
}
d_out[color_tid]=d_in[color_tid1];
//d_out[color_tid+1]=d_in[color_tid1+1];
//d_out[color_tid+2]=d_in[color_tid1+2];
}
} |
2c1ecd2d4a92591fa50c5dcdac04bb89b66c3bcb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define BLOCK_SIZE 1024 // You can change this
//#define NUM_OF_ELEMS 1e6 // You can change this
__global__ void total(float * input, float * output, int len)
{
int tid_x = blockIdx.x * blockDim.x + threadIdx.x ;
int tid_y= blockIdx.y * blockDim.y + threadIdx.y;
int tid= tid_x+tid_y*tid_x;
// Traverse reduction tree
for (unsigned int stride = len/2; stride > 0; stride /= 2)
{
if ((tid < stride) && (input[tid] > input[tid + stride]) ){
input[tid] = input[tid + stride];}
__syncthreads();
}
// Write the computed sum of the block to the output vector at correct index
if (tid == 0)
{
output[0] = input[0];
}
}
int main(int argc, char ** argv)
{
float * hostInput; // The input 1D vector
float * hostOutput; // The output vector
float * deviceInput;
float * deviceOutput;
int NUM_OF_ELEMS=pow(2,24);
hipEvent_t start=0;
hipEvent_t stop=0;
float timef=0;
hipEventCreate(&start);
hipEventCreate(&stop);
int numInputElements = NUM_OF_ELEMS; // number of elements in the input list
//int numOutputElements; // number of elements in the output list
hostInput = (float *) malloc(sizeof(float) * numInputElements);
srand(time(NULL));
for (int i=0; i < NUM_OF_ELEMS; i++)
{
hostInput[i] = rand();
}
printf("host %f and %f and %f \n", hostInput[10] , hostInput[20] , hostInput[30]);
hostOutput = (float*) malloc(sizeof(float));
//@@ Allocate GPU memory here
hipMalloc((void **)&deviceInput, numInputElements * sizeof(float));
hipMalloc((void **)&deviceOutput, sizeof(float));
// Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), hipMemcpyHostToDevice);
// Initialize the grid and block dimensions here
dim3 DimGrid(8192, 2, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
// Launch the GPU Kernel here
hipEventRecord(start,0);
hipLaunchKernelGGL(( total), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput, deviceOutput, numInputElements);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
// Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, sizeof(float), hipMemcpyDeviceToHost);
printf("Reduced Sum from GPU = %f \n", hostOutput[0]);
hipEventElapsedTime(&timef,start,stop);
printf("time of the Kernel %f \n",timef );
// Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
free(hostInput);
free(hostOutput);
return 0;
}
| 2c1ecd2d4a92591fa50c5dcdac04bb89b66c3bcb.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define BLOCK_SIZE 1024 // You can change this
//#define NUM_OF_ELEMS 1e6 // You can change this
__global__ void total(float * input, float * output, int len)
{
int tid_x = blockIdx.x * blockDim.x + threadIdx.x ;
int tid_y= blockIdx.y * blockDim.y + threadIdx.y;
int tid= tid_x+tid_y*tid_x;
// Traverse reduction tree
for (unsigned int stride = len/2; stride > 0; stride /= 2)
{
if ((tid < stride) && (input[tid] > input[tid + stride]) ){
input[tid] = input[tid + stride];}
__syncthreads();
}
// Write the computed sum of the block to the output vector at correct index
if (tid == 0)
{
output[0] = input[0];
}
}
int main(int argc, char ** argv)
{
float * hostInput; // The input 1D vector
float * hostOutput; // The output vector
float * deviceInput;
float * deviceOutput;
int NUM_OF_ELEMS=pow(2,24);
cudaEvent_t start=0;
cudaEvent_t stop=0;
float timef=0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int numInputElements = NUM_OF_ELEMS; // number of elements in the input list
//int numOutputElements; // number of elements in the output list
hostInput = (float *) malloc(sizeof(float) * numInputElements);
srand(time(NULL));
for (int i=0; i < NUM_OF_ELEMS; i++)
{
hostInput[i] = rand();
}
printf("host %f and %f and %f \n", hostInput[10] , hostInput[20] , hostInput[30]);
hostOutput = (float*) malloc(sizeof(float));
//@@ Allocate GPU memory here
cudaMalloc((void **)&deviceInput, numInputElements * sizeof(float));
cudaMalloc((void **)&deviceOutput, sizeof(float));
// Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
// Initialize the grid and block dimensions here
dim3 DimGrid(8192, 2, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
// Launch the GPU Kernel here
cudaEventRecord(start,0);
total<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
// Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, sizeof(float), cudaMemcpyDeviceToHost);
printf("Reduced Sum from GPU = %f \n", hostOutput[0]);
cudaEventElapsedTime(&timef,start,stop);
printf("time of the Kernel %f \n",timef );
// Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
free(hostInput);
free(hostOutput);
return 0;
}
|
8d4d0e1eacf728ac77d5fc86d3f93b9b7856a158.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "rpsroi_pooling_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void RPSROIPoolForward(const int nthreads, const float* bottom_data,
const float spatial_scale, const int height, const int width,
const int channels, const int pooled_height, const int pooled_width,
const int group_size, const int output_dim,
const float* bottom_rois, float* top_data, int* mapping_channel, float* areas)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
// printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
/////////////////////////////////////////////////////////////////////
//cout << "start&&end norm: " << hstart << " " << hend << " " << wstart << " " << wend;
//printf("start&&end norm: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
// printf("c:%d %d %d %d\n", c, channels, height, width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
//printf("get value: %d, %d, %d, %f\n", c, 270, 765, bottom_data[270*width + 765]);
float out_sum = 0;
float bin_area = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
out_sum += bottom_data[bottom_index];
bin_area += 1;
}
}
}
/////////////////////////////DEBUG//////////////////////////
//cout << "bin_area: " << bin_area <<" out_sum: " << out_sum << endl;
//printf("bin_area: %f, out_sum: %f\n", bin_area, out_sum);
top_data[index] = (is_empty || (bin_area ==0)) ? 0. : out_sum/bin_area;
mapping_channel[index] = c;
areas[index] = bin_area;
}
}
int RPSROIPoolForwardLauncher(
const float* bottom_data, const float spatial_scale, const int num_rois, const int height,
const int width, const int channels, const int pooled_height,
const int pooled_width, const float* bottom_rois,
const int group_size, const int output_dim,
float* top_data, int* mapping_channel, float* areas, hipStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
hipError_t err;
hipLaunchKernelGGL(( RPSROIPoolForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
output_size, bottom_data, spatial_scale, height, width, channels, pooled_height,
pooled_width, group_size, output_dim, bottom_rois, top_data, mapping_channel, areas);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
__global__ void RPSROIPoolBackward(const int nthreads, const float* top_diff,
const int* mapping_channel, const float* areas, const int num_rois, const float spatial_scale,
const int height, const int width, const int channels,
const int pooled_height, const int pooled_width, const int output_dim, float* bottom_diff,
const float* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
//printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
float* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
float bin_area = areas[index];
float diff_val = (is_empty || (bin_area == 0)) ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
}
int RPSROIPoolBackwardLauncher(const float* top_diff, const int* mapping_channel, const float* areas, const int batch_size, const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_width,
const int pooled_height, const int output_dim,
float* bottom_diff, const float* bottom_rois, hipStream_t stream)
{
const int kThreadsPerBlock = 1024;
//const int output_size = output_dim * height * width * channels;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
hipError_t err;
hipLaunchKernelGGL(( RPSROIPoolBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
output_size, top_diff, mapping_channel, areas, num_rois, spatial_scale, height, width, channels, pooled_height,
pooled_width, output_dim, bottom_diff, bottom_rois);
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
return 1;
}
#ifdef __cplusplus
}
#endif
| 8d4d0e1eacf728ac77d5fc86d3f93b9b7856a158.cu | #ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "rpsroi_pooling_kernel.h"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
__global__ void RPSROIPoolForward(const int nthreads, const float* bottom_data,
const float spatial_scale, const int height, const int width,
const int channels, const int pooled_height, const int pooled_width,
const int group_size, const int output_dim,
const float* bottom_rois, float* top_data, int* mapping_channel, float* areas)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
// printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
/////////////////////////////////////////////////////////////////////
//cout << "start&&end norm: " << hstart << " " << hend << " " << wstart << " " << wend;
//printf("start&&end norm: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
int gw = pw;
int gh = ph;
int c = (ctop*group_size + gh)*group_size + gw;
// printf("c:%d %d %d %d\n", c, channels, height, width);
bottom_data += (roi_batch_ind * channels + c) * height * width;
//printf("get value: %d, %d, %d, %f\n", c, 270, 765, bottom_data[270*width + 765]);
float out_sum = 0;
float bin_area = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
out_sum += bottom_data[bottom_index];
bin_area += 1;
}
}
}
/////////////////////////////DEBUG//////////////////////////
//cout << "bin_area: " << bin_area <<" out_sum: " << out_sum << endl;
//printf("bin_area: %f, out_sum: %f\n", bin_area, out_sum);
top_data[index] = (is_empty || (bin_area ==0)) ? 0. : out_sum/bin_area;
mapping_channel[index] = c;
areas[index] = bin_area;
}
}
int RPSROIPoolForwardLauncher(
const float* bottom_data, const float spatial_scale, const int num_rois, const int height,
const int width, const int channels, const int pooled_height,
const int pooled_width, const float* bottom_rois,
const int group_size, const int output_dim,
float* top_data, int* mapping_channel, float* areas, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
cudaError_t err;
RPSROIPoolForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size, bottom_data, spatial_scale, height, width, channels, pooled_height,
pooled_width, group_size, output_dim, bottom_rois, top_data, mapping_channel, areas);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
__global__ void RPSROIPoolBackward(const int nthreads, const float* top_diff,
const int* mapping_channel, const float* areas, const int num_rois, const float spatial_scale,
const int height, const int width, const int channels,
const int pooled_height, const int pooled_width, const int output_dim, float* bottom_diff,
const float* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
float roi_x1 = static_cast<float>(round(bottom_rois[1])) * spatial_scale;
float roi_y1 = static_cast<float>(round(bottom_rois[2])) * spatial_scale;
float roi_x2 = static_cast<float>(round(bottom_rois[3])) * spatial_scale;
float roi_y2 = static_cast<float>(round(bottom_rois[4])) * spatial_scale;
float roi_x3 = static_cast<float>(round(bottom_rois[5])) * spatial_scale;
float roi_y3 = static_cast<float>(round(bottom_rois[6])) * spatial_scale;
float roi_x4 = static_cast<float>(round(bottom_rois[7])) * spatial_scale;
float roi_y4 = static_cast<float>(round(bottom_rois[8])) * spatial_scale;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "rois: " << roi_x1 << " " << roi_y1 << " " << roi_x2 << " " << roi_y2 << " " << roi_x3 << " " << roi_y3 << " " << roi_x4 << " " << roi_y4 << endl;
//printf("rois: %f, %f, %f, %f, %f, %f, %f, %f\n", roi_x1, roi_y1, roi_x2, roi_y2, roi_x3, roi_y3, roi_x4, roi_y4);
float anchor_x1 = static_cast<float>(pw) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y1 = static_cast<float>(pw) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x2 = static_cast<float>(pw+1) * (roi_x2 - roi_x1) / pooled_width + roi_x1;
float anchor_y2 = static_cast<float>(pw+1) * (roi_y2 - roi_y1) / pooled_width + roi_y1;
float anchor_x3 = static_cast<float>(pw+1) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y3 = static_cast<float>(pw+1) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
float anchor_x4 = static_cast<float>(pw) * (roi_x3 - roi_x4) / pooled_width + roi_x4;
float anchor_y4 = static_cast<float>(pw) * (roi_y3 - roi_y4) / pooled_width + roi_y4;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "anchor: " << anchor_x1 << " " << anchor_y1 << " " << anchor_x2 << " " << anchor_y2 << " " << anchor_x3 << " " << anchor_y3 << " " << anchor_x4 << " " << anchor_y4 <<endl;
//printf("anchor: %f, %f, %f, %f, %f, %f, %f, %f\n", anchor_x1, anchor_y1, anchor_x2, anchor_y2, anchor_x3, anchor_y3, anchor_x4, anchor_y4);
float grid_x1 = static_cast<float>(ph) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y1 = static_cast<float>(ph) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x4 = static_cast<float>(ph + 1) * (anchor_x4 - anchor_x1) / pooled_height + anchor_x1;
float grid_y4 = static_cast<float>(ph + 1) * (anchor_y4 - anchor_y1) / pooled_height + anchor_y1;
float grid_x2 = static_cast<float>(ph) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y2 = static_cast<float>(ph) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
float grid_x3 = static_cast<float>(ph + 1) * (anchor_x3 - anchor_x2) / pooled_height + anchor_x2;
float grid_y3 = static_cast<float>(ph + 1) * (anchor_y3 - anchor_y2) / pooled_height + anchor_y2;
////////////////////////////////DEBUG////////////////////////////////////
//cout << "grid: " << grid_x1 << " " << grid_y1 << " " << grid_x2 << " " << grid_y2 << " " << grid_x3 << " " << grid_y3 << " " << grid_x4 << " " << grid_y4 << endl;
//printf("grid: %f, %f, %f, %f, %f, %f, %f, %f\n", grid_x1, grid_y1, grid_x2, grid_y2, grid_x3, grid_y3, grid_x4, grid_y4);
//printf("min:%f, %f, %f\n", grid_y1, grid_y2, min(grid_y1, grid_y2));
//printf("min_grid:%f, %f, %f\n", grid_y1, grid_y2, floor(min(grid_y1, grid_y2)));
int hstart = static_cast<int>(floor(min(min(min(grid_y1, grid_y2) , grid_y3), grid_y4)));
int hend = static_cast<int>(ceil(max(max(max(grid_y1, grid_y2) , grid_y3), grid_y4)));
int wstart = static_cast<int>(floor(min(min(min(grid_x1, grid_x2) , grid_x3), grid_x4)));
int wend = static_cast<int>(ceil(max(max(max(grid_x1, grid_x2) , grid_x3), grid_x4)));
///////////////////////////////DEBUG/////////////////////////////////////
//cout << "start&&end: " << hstart << " " << hend << " " << wstart << " " << wend << endl;
//printf("start&&end: %d, %d, %d, %d\n", hstart, hend, wstart, wend);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
float* offset_bottom_diff = bottom_diff +
(roi_batch_ind * channels + c) * height * width;
float bin_area = areas[index];
float diff_val = (is_empty || (bin_area == 0)) ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
float p1 = (grid_x2 - grid_x1) * (h - grid_y1) - (w - grid_x1) * (grid_y2 - grid_y1);
float p2 = (grid_x3 - grid_x2) * (h - grid_y2) - (w - grid_x2) * (grid_y3 - grid_y2);
float p3 = (grid_x4 - grid_x3) * (h - grid_y3) - (w - grid_x3) * (grid_y4 - grid_y3);
float p4 = (grid_x1 - grid_x4) * (h - grid_y4) - (w - grid_x4) * (grid_y1 - grid_y4);
if(p1 >= 0 && p2 >= 0 && p3 >= 0 && p4 >= 0){
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
}
int RPSROIPoolBackwardLauncher(const float* top_diff, const int* mapping_channel, const float* areas, const int batch_size, const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_width,
const int pooled_height, const int output_dim,
float* bottom_diff, const float* bottom_rois, cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
//const int output_size = output_dim * height * width * channels;
const int output_size = output_dim * pooled_height * pooled_width * num_rois;
cudaError_t err;
RPSROIPoolBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size, top_diff, mapping_channel, areas, num_rois, spatial_scale, height, width, channels, pooled_height,
pooled_width, output_dim, bottom_diff, bottom_rois);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
1ac096a4f74731dba87bcbf77789eb8769d7059d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
long long int N = 1 << 27;
dim3 dimblock(1024, 1);
dim3 dimgrid((N+dimblock.x-1)/dimblock.x, 1, 1);
double iStart = 0, iElaps = 0, iElaps2 = 0, sum_iElaps2 = 0;
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(long long int *hostRef, long long int *gpuRef, long long int N) {
double epsilon = 1.0E-8;
long long int match = 1;
for (long long int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Results do not match!\n");
printf("host %lld gpu %lld at current%lld\n", hostRef[i], gpuRef[i], i);
//break;
}
}
if (match)
printf("Results match.\n");
return;
}
//===========================================CPU============================================
void merge(long long int* word,long long int h,long long int t,long long int m,long long int* ans){//()
long long int i=h,j=m+1,k=h;
while(i<=m && j<=t){
if(word[i]<=word[j]){//
ans[k]=word[i];
i++;
}
else{//
ans[k]=word[j];
j++;
}
k++;
}
while (i<=m) {
ans[k]=word[i];
i++;
k++;
}
while (j<=t) {
ans[k]=word[j];
j++;
k++;
}
for(long long int i=h;i<=t;i++){
word[i]=ans[i];
}
}
void cpu_mergesort(long long int* word,long long int h,long long int t,long long int* ans){//()
if(h<t){
long long int m=(h+t)/2;
cpu_mergesort(word,h,m,ans);//
cpu_mergesort(word,m+1,t,ans);
merge(word,h,t,m,ans);//
}
}
//==========================================CPU END=============================================
//===========================================GPU============================================
__device__ void gpu_bottomUpMerge(long long int* source, long long int* dest, long long int start, long long int middle, long long int end) {
long long int i = start;
long long int j = middle;
for (long long int k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
__global__ void gpu_mergesort(long long int* source, long long int* dest, long long int size, long long int width) {
long long int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long long int start = width*idx, middle, end;
if (start < size){
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
}
}
void mergesort(long long int *gpu_sorted_data, long long int size, long long int *Ans_gpu_sorted_data) {
long long int* D_data;
long long int* D_swp;
hipMalloc((void**) &D_data, size * sizeof(long long int));
hipMalloc((void**) &D_swp, size * sizeof(long long int));
hipMemcpy(D_data, gpu_sorted_data, size * sizeof(long long int), hipMemcpyHostToDevice);
long long int* A = D_data;
long long int* B = D_swp;
//long long int nThreads = dimblock.x * dimblock.y * dimblock.z * dimgrid.x * dimgrid.y * dimgrid.z;
for (long long int width = 2; width < (size << 1); width <<= 1) {
iStart = cpuSecond();
hipLaunchKernelGGL(( gpu_mergesort), dim3(dimgrid), dim3(dimblock), 0, 0, A, B, size, width);
iElaps2 = cpuSecond() - iStart;
sum_iElaps2 += iElaps2;
printf("width = %lld elapsed time: %7.5f ms\n", width, iElaps2*1000);
A = A == D_data ? D_swp : D_data;
B = B == D_data ? D_swp : D_data;
}
hipMemcpy(Ans_gpu_sorted_data, A, size * sizeof(long long int), hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipDeviceReset();
}
//==========================================GPU END=============================================
int main(int argc, char **argv)
{
iStart = 0, iElaps = 0, iElaps2 = 0;
long long int *data, *cpu_sorted_data, *gpu_sorted_data;
long long int *Ans_cpu_sorted_data, *Ans_gpu_sorted_data;
if (argc == 2)
N = atoi(argv[1]);
printf("N = %lld\n", N);
data = (long long int *)malloc(N*sizeof(long long int));
cpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
gpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
Ans_cpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
Ans_gpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
srand(time(NULL));
for (long long int k=0; k<N; k++)
data[k] = rand() % (N*10);
printf("===========================CPU====================================\n");
memcpy(cpu_sorted_data, data, N*sizeof(long long int));
printf("cpu Mergesort is starting...\n");
iStart = cpuSecond();
cpu_mergesort(cpu_sorted_data, 0, N-1, Ans_cpu_sorted_data);//cpu_mergesort
iElaps = cpuSecond() - iStart;
printf("cpu Mergesort is done.\n");
printf("Mergesort on CPU elapsed time: %7.5f ms\n", iElaps*1000);
printf("===========================GPU====================================\n");
//printf("//////////////////=====\n");
memcpy(gpu_sorted_data, data, N*sizeof(long long int));
printf("GPU Mergesort is starting...\n");
//iStart = cpuSecond();
sum_iElaps2 = 0;
mergesort(gpu_sorted_data, N, Ans_gpu_sorted_data);
//iElaps2 = cpuSecond() - iStart;
printf("GPU Mergesort is done.\n");
printf("Mergesort on GPU elapsed time: %7.5f ms\n", sum_iElaps2*1000);
checkResult(Ans_cpu_sorted_data, Ans_gpu_sorted_data, N);
free(data);
free(cpu_sorted_data);
free(gpu_sorted_data);
free(Ans_cpu_sorted_data);
free(Ans_gpu_sorted_data);
return 0;
}
| 1ac096a4f74731dba87bcbf77789eb8769d7059d.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
long long int N = 1 << 27;
dim3 dimblock(1024, 1);
dim3 dimgrid((N+dimblock.x-1)/dimblock.x, 1, 1);
double iStart = 0, iElaps = 0, iElaps2 = 0, sum_iElaps2 = 0;
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(long long int *hostRef, long long int *gpuRef, long long int N) {
double epsilon = 1.0E-8;
long long int match = 1;
for (long long int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Results do not match!\n");
printf("host %lld gpu %lld at current%lld\n", hostRef[i], gpuRef[i], i);
//break;
}
}
if (match)
printf("Results match.\n");
return;
}
//===========================================CPU============================================
void merge(long long int* word,long long int h,long long int t,long long int m,long long int* ans){//合併(想成兩個陣列合併)
long long int i=h,j=m+1,k=h;
while(i<=m && j<=t){
if(word[i]<=word[j]){//後面比前面大,不用交換
ans[k]=word[i];
i++;
}
else{//前面比後面大,要交換
ans[k]=word[j];
j++;
}
k++;
}
while (i<=m) {
ans[k]=word[i];
i++;
k++;
}
while (j<=t) {
ans[k]=word[j];
j++;
k++;
}
for(long long int i=h;i<=t;i++){
word[i]=ans[i];
}
}
void cpu_mergesort(long long int* word,long long int h,long long int t,long long int* ans){//先切(從中間切)
if(h<t){
long long int m=(h+t)/2;
cpu_mergesort(word,h,m,ans);//分成兩組再切
cpu_mergesort(word,m+1,t,ans);
merge(word,h,t,m,ans);//切完再合
}
}
//==========================================CPU END=============================================
//===========================================GPU============================================
__device__ void gpu_bottomUpMerge(long long int* source, long long int* dest, long long int start, long long int middle, long long int end) {
long long int i = start;
long long int j = middle;
for (long long int k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
__global__ void gpu_mergesort(long long int* source, long long int* dest, long long int size, long long int width) {
long long int idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long long int start = width*idx, middle, end;
if (start < size){
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
}
}
void mergesort(long long int *gpu_sorted_data, long long int size, long long int *Ans_gpu_sorted_data) {
long long int* D_data;
long long int* D_swp;
cudaMalloc((void**) &D_data, size * sizeof(long long int));
cudaMalloc((void**) &D_swp, size * sizeof(long long int));
cudaMemcpy(D_data, gpu_sorted_data, size * sizeof(long long int), cudaMemcpyHostToDevice);
long long int* A = D_data;
long long int* B = D_swp;
//long long int nThreads = dimblock.x * dimblock.y * dimblock.z * dimgrid.x * dimgrid.y * dimgrid.z;
for (long long int width = 2; width < (size << 1); width <<= 1) {
iStart = cpuSecond();
gpu_mergesort<<<dimgrid, dimblock>>>(A, B, size, width);
iElaps2 = cpuSecond() - iStart;
sum_iElaps2 += iElaps2;
printf("width = %lld elapsed time: %7.5f ms\n", width, iElaps2*1000);
A = A == D_data ? D_swp : D_data;
B = B == D_data ? D_swp : D_data;
}
cudaMemcpy(Ans_gpu_sorted_data, A, size * sizeof(long long int), cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaDeviceReset();
}
//==========================================GPU END=============================================
int main(int argc, char **argv)
{
iStart = 0, iElaps = 0, iElaps2 = 0;
long long int *data, *cpu_sorted_data, *gpu_sorted_data;
long long int *Ans_cpu_sorted_data, *Ans_gpu_sorted_data;
if (argc == 2)
N = atoi(argv[1]);
printf("N = %lld\n", N);
data = (long long int *)malloc(N*sizeof(long long int));
cpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
gpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
Ans_cpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
Ans_gpu_sorted_data = (long long int *)malloc(N*sizeof(long long int));
srand(time(NULL));
for (long long int k=0; k<N; k++)
data[k] = rand() % (N*10);
printf("===========================CPU====================================\n");
memcpy(cpu_sorted_data, data, N*sizeof(long long int));
printf("cpu Mergesort is starting...\n");
iStart = cpuSecond();
cpu_mergesort(cpu_sorted_data, 0, N-1, Ans_cpu_sorted_data);//cpu_mergesort
iElaps = cpuSecond() - iStart;
printf("cpu Mergesort is done.\n");
printf("Mergesort on CPU elapsed time: %7.5f ms\n", iElaps*1000);
printf("===========================GPU====================================\n");
//printf("//////////////////=====\n");
memcpy(gpu_sorted_data, data, N*sizeof(long long int));
printf("GPU Mergesort is starting...\n");
//iStart = cpuSecond();
sum_iElaps2 = 0;
mergesort(gpu_sorted_data, N, Ans_gpu_sorted_data);
//iElaps2 = cpuSecond() - iStart;
printf("GPU Mergesort is done.\n");
printf("Mergesort on GPU elapsed time: %7.5f ms\n", sum_iElaps2*1000);
checkResult(Ans_cpu_sorted_data, Ans_gpu_sorted_data, N);
free(data);
free(cpu_sorted_data);
free(gpu_sorted_data);
free(Ans_cpu_sorted_data);
free(Ans_gpu_sorted_data);
return 0;
}
|
f5b43304cc00ddd2b2127fb9083a1057a0551af6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT8_device( float2 *work )
{
int tid = threadIdx.x;
work += (blockIdx.y * gridDim.x + blockIdx.x) * 512 + tid;
float2 a[8];
load<8>( a, work, 64 );
IFFT8( a );
store<8>( a, work, 64 );
}
extern "C" void IFFT8( float2 *work, int batch )
{
hipLaunchKernelGGL(( IFFT8_device), dim3(grid2D(batch/64)), dim3(64) , 0, 0, work );
}
| f5b43304cc00ddd2b2127fb9083a1057a0551af6.cu | // Written by Vasily Volkov.
// Copyright (c) 2008-2009, The Regents of the University of California.
// All rights reserved.
#include "codelets.h"
__global__ void IFFT8_device( float2 *work )
{
int tid = threadIdx.x;
work += (blockIdx.y * gridDim.x + blockIdx.x) * 512 + tid;
float2 a[8];
load<8>( a, work, 64 );
IFFT8( a );
store<8>( a, work, 64 );
}
extern "C" void IFFT8( float2 *work, int batch )
{
IFFT8_device<<< grid2D(batch/64), 64 >>>( work );
}
|
7838287e4405401012027e6ad79e5d113aa75669.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <future>
#define SNAPSHOT_INCREMENT 200.0f
#define SAMPLE_BUF_SIZE 1024
#define NUMERIC_TYPE float
#define C(x) x##f
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line)
{
if (code != hipSuccess)
{
fprintf(stderr, "%s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
typedef struct Samples
{
NUMERIC_TYPE* time;
NUMERIC_TYPE* value;
int buf_size;
} Samples;
void allocate_D(Samples& buf, const int buf_size)
{
buf.buf_size = buf_size;
checkCudaErrors(hipMalloc(&(buf.time), buf_size*sizeof(NUMERIC_TYPE)));
checkCudaErrors(hipMalloc(&(buf.value), buf_size*sizeof(NUMERIC_TYPE)));
}
void allocate_H(Samples& buf, const int buf_size)
{
buf.buf_size = buf_size;
checkCudaErrors(hipHostMalloc(&(buf.time), buf_size*sizeof(NUMERIC_TYPE)));
checkCudaErrors(hipHostMalloc(&(buf.value), buf_size*sizeof(NUMERIC_TYPE)));
}
void copy_device_to_host(Samples& buf_H, Samples& buf_D, const int entries)
{
checkCudaErrors(hipMemcpy(buf_H.time, buf_D.time, entries*sizeof(NUMERIC_TYPE), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(buf_H.value, buf_D.value, entries*sizeof(NUMERIC_TYPE), hipMemcpyDeviceToHost));
}
void deallocate_D(Samples& buf)
{
checkCudaErrors(hipFree(buf.time));
checkCudaErrors(hipFree(buf.value));
}
void deallocate_H(Samples& buf)
{
checkCudaErrors(hipHostFree(buf.time));
checkCudaErrors(hipHostFree(buf.value));
}
__constant__ int xsz;
__constant__ NUMERIC_TYPE end_time = C(1000.0);
__managed__ bool finished = false;
__managed__ bool write_snapshot = false;
__managed__ NUMERIC_TYPE t = C(0.0);
__device__ NUMERIC_TYPE next_snapshot_time = SNAPSHOT_INCREMENT;
Samples buf_D;
Samples buf_H;
__managed__ int sample_idx = 0;
void write(NUMERIC_TYPE* snapshot, const int xsz, NUMERIC_TYPE t)
{
char filename[256];
sprintf(filename, "results/%.3f.dat", t);
FILE* file = fopen(filename, "wb");
for (int i=0; i<xsz; i++)
{
fprintf(file, "%.1f\t", snapshot[i]);
if (i % 80 == 79) fprintf(file, "\n");
}
fclose(file);
}
void write(FILE* file, Samples& buf, const int entries)
{
for (int i=0; i<entries; i++)
{
fprintf(file, "%.2f %.2f\n", buf.time[i], buf.value[i]);
}
}
__global__ void update_flow_variables(NUMERIC_TYPE* U)
{
int global_i = blockIdx.x*blockDim.x + threadIdx.x;
for (int i=global_i; i<xsz; i+=blockDim.x*gridDim.x)
{
NUMERIC_TYPE u = U[i];
for (int l=0; l<10; l++)
{
u = u*u;
u = sqrt(u);
}
U[i] = u + C(0.1);
}
}
__global__ void simulate(NUMERIC_TYPE* U, Samples buf)
{
NUMERIC_TYPE dt = C(0.1);
write_snapshot = false;
sample_idx = 0;
while (t < end_time && t < next_snapshot_time && sample_idx < SAMPLE_BUF_SIZE)
{
hipLaunchKernelGGL(( update_flow_variables), dim3(256), dim3(256), 0, 0, U);
if (hipDeviceSynchronize() != hipSuccess) return;
t += dt;
buf.time[sample_idx] = t;
buf.value[sample_idx] = U[0];
sample_idx++;
if (t >= end_time)
{
write_snapshot = true;
finished = true;
}
}
if (t >= next_snapshot_time)
{
write_snapshot = true;
next_snapshot_time += SNAPSHOT_INCREMENT;
}
}
int main(int argc, char *argv[])
{
const int xsz_H = 2 << 24; // ~16M elements
const size_t U_size = xsz_H*sizeof(NUMERIC_TYPE);
checkCudaErrors(hipMemcpyToSymbol(xsz, &xsz_H, sizeof(int)));
NUMERIC_TYPE* snapshot_H;
checkCudaErrors(hipHostMalloc(&snapshot_H, U_size));
memset(snapshot_H, 0, U_size);
NUMERIC_TYPE* current_solution_D;
checkCudaErrors(hipMalloc(¤t_solution_D, U_size));
checkCudaErrors(hipMemcpy(current_solution_D, snapshot_H, U_size, hipMemcpyHostToDevice));
allocate_D(buf_D, SAMPLE_BUF_SIZE);
allocate_H(buf_H, SAMPLE_BUF_SIZE);
FILE* samples_file = fopen("results/samples.dat", "wb");
auto snapshot_future = std::async(std::launch::async, []{});
auto samples_future = std::async(std::launch::async, []{});
printf("HOST initialised\n");
while (!finished)
{
hipLaunchKernelGGL(( simulate), dim3(1),dim3(1), 0, 0, current_solution_D, buf_D);
samples_future.wait();
snapshot_future.wait();
checkCudaErrors(hipDeviceSynchronize());
printf("HOST memcpy from device to host\n");
checkCudaErrors(hipMemcpy(snapshot_H, current_solution_D, U_size, hipMemcpyDeviceToHost));
copy_device_to_host(buf_H, buf_D, sample_idx);
printf("HOST t=%f\n", t);
printf("HOST write samples async\n");
const int entries = sample_idx;
samples_future = std::async(std::launch::async, [&]{write(samples_file, buf_H, entries);});
if (write_snapshot)
{
printf("HOST write snapshot async\n");
const int t_H = t;
snapshot_future = std::async(std::launch::async, [&]{write(snapshot_H, xsz_H, t_H);});
}
}
samples_future.wait();
snapshot_future.wait();
checkCudaErrors(hipHostFree(snapshot_H));
checkCudaErrors(hipFree(current_solution_D));
deallocate_D(buf_D);
deallocate_H(buf_H);
fclose(samples_file);
return 0;
}
| 7838287e4405401012027e6ad79e5d113aa75669.cu | #include <cstdio>
#include <future>
#define SNAPSHOT_INCREMENT 200.0f
#define SAMPLE_BUF_SIZE 1024
#define NUMERIC_TYPE float
#define C(x) x##f
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr, "%s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
typedef struct Samples
{
NUMERIC_TYPE* time;
NUMERIC_TYPE* value;
int buf_size;
} Samples;
void allocate_D(Samples& buf, const int buf_size)
{
buf.buf_size = buf_size;
checkCudaErrors(cudaMalloc(&(buf.time), buf_size*sizeof(NUMERIC_TYPE)));
checkCudaErrors(cudaMalloc(&(buf.value), buf_size*sizeof(NUMERIC_TYPE)));
}
void allocate_H(Samples& buf, const int buf_size)
{
buf.buf_size = buf_size;
checkCudaErrors(cudaMallocHost(&(buf.time), buf_size*sizeof(NUMERIC_TYPE)));
checkCudaErrors(cudaMallocHost(&(buf.value), buf_size*sizeof(NUMERIC_TYPE)));
}
void copy_device_to_host(Samples& buf_H, Samples& buf_D, const int entries)
{
checkCudaErrors(cudaMemcpy(buf_H.time, buf_D.time, entries*sizeof(NUMERIC_TYPE), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(buf_H.value, buf_D.value, entries*sizeof(NUMERIC_TYPE), cudaMemcpyDeviceToHost));
}
void deallocate_D(Samples& buf)
{
checkCudaErrors(cudaFree(buf.time));
checkCudaErrors(cudaFree(buf.value));
}
void deallocate_H(Samples& buf)
{
checkCudaErrors(cudaFreeHost(buf.time));
checkCudaErrors(cudaFreeHost(buf.value));
}
__constant__ int xsz;
__constant__ NUMERIC_TYPE end_time = C(1000.0);
__managed__ bool finished = false;
__managed__ bool write_snapshot = false;
__managed__ NUMERIC_TYPE t = C(0.0);
__device__ NUMERIC_TYPE next_snapshot_time = SNAPSHOT_INCREMENT;
Samples buf_D;
Samples buf_H;
__managed__ int sample_idx = 0;
void write(NUMERIC_TYPE* snapshot, const int xsz, NUMERIC_TYPE t)
{
char filename[256];
sprintf(filename, "results/%.3f.dat", t);
FILE* file = fopen(filename, "wb");
for (int i=0; i<xsz; i++)
{
fprintf(file, "%.1f\t", snapshot[i]);
if (i % 80 == 79) fprintf(file, "\n");
}
fclose(file);
}
void write(FILE* file, Samples& buf, const int entries)
{
for (int i=0; i<entries; i++)
{
fprintf(file, "%.2f %.2f\n", buf.time[i], buf.value[i]);
}
}
__global__ void update_flow_variables(NUMERIC_TYPE* U)
{
int global_i = blockIdx.x*blockDim.x + threadIdx.x;
for (int i=global_i; i<xsz; i+=blockDim.x*gridDim.x)
{
NUMERIC_TYPE u = U[i];
for (int l=0; l<10; l++)
{
u = u*u;
u = sqrt(u);
}
U[i] = u + C(0.1);
}
}
__global__ void simulate(NUMERIC_TYPE* U, Samples buf)
{
NUMERIC_TYPE dt = C(0.1);
write_snapshot = false;
sample_idx = 0;
while (t < end_time && t < next_snapshot_time && sample_idx < SAMPLE_BUF_SIZE)
{
update_flow_variables<<<256, 256>>>(U);
if (cudaDeviceSynchronize() != cudaSuccess) return;
t += dt;
buf.time[sample_idx] = t;
buf.value[sample_idx] = U[0];
sample_idx++;
if (t >= end_time)
{
write_snapshot = true;
finished = true;
}
}
if (t >= next_snapshot_time)
{
write_snapshot = true;
next_snapshot_time += SNAPSHOT_INCREMENT;
}
}
int main(int argc, char *argv[])
{
const int xsz_H = 2 << 24; // ~16M elements
const size_t U_size = xsz_H*sizeof(NUMERIC_TYPE);
checkCudaErrors(cudaMemcpyToSymbol(xsz, &xsz_H, sizeof(int)));
NUMERIC_TYPE* snapshot_H;
checkCudaErrors(cudaMallocHost(&snapshot_H, U_size));
memset(snapshot_H, 0, U_size);
NUMERIC_TYPE* current_solution_D;
checkCudaErrors(cudaMalloc(¤t_solution_D, U_size));
checkCudaErrors(cudaMemcpy(current_solution_D, snapshot_H, U_size, cudaMemcpyHostToDevice));
allocate_D(buf_D, SAMPLE_BUF_SIZE);
allocate_H(buf_H, SAMPLE_BUF_SIZE);
FILE* samples_file = fopen("results/samples.dat", "wb");
auto snapshot_future = std::async(std::launch::async, []{});
auto samples_future = std::async(std::launch::async, []{});
printf("HOST initialised\n");
while (!finished)
{
simulate<<<1,1>>>(current_solution_D, buf_D);
samples_future.wait();
snapshot_future.wait();
checkCudaErrors(cudaDeviceSynchronize());
printf("HOST memcpy from device to host\n");
checkCudaErrors(cudaMemcpy(snapshot_H, current_solution_D, U_size, cudaMemcpyDeviceToHost));
copy_device_to_host(buf_H, buf_D, sample_idx);
printf("HOST t=%f\n", t);
printf("HOST write samples async\n");
const int entries = sample_idx;
samples_future = std::async(std::launch::async, [&]{write(samples_file, buf_H, entries);});
if (write_snapshot)
{
printf("HOST write snapshot async\n");
const int t_H = t;
snapshot_future = std::async(std::launch::async, [&]{write(snapshot_H, xsz_H, t_H);});
}
}
samples_future.wait();
snapshot_future.wait();
checkCudaErrors(cudaFreeHost(snapshot_H));
checkCudaErrors(cudaFree(current_solution_D));
deallocate_D(buf_D);
deallocate_H(buf_H);
fclose(samples_file);
return 0;
}
|
1b47e76aa5b1d14dc6c8fb9a34d34b8bb060386c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "../include/util.cuh"
#include "../include/worker.cuh"
#include "../include/timer.cuh"
using namespace std;
/*
* ====================
* WorkResult
* ====================
*/
WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) {
}
WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) {
}
WorkResult::~WorkResult() {
delete _results; // delete NULL is ok
}
Cost& WorkResult::getResults() const {
return *_results;
}
WorkResult::RESULTS WorkResult::getResultType() const {
return _resultType;
}
/*
* ====================
* Worker
* ====================
*/
Worker::Worker(ConvNet& convNet) : _convNet(&convNet) {
}
Worker::~Worker() {
}
/*
* ====================
* DataWorker
* ====================
*/
DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data), _dp(NULL) {
assert(_data != NULL);
}
bool DataWorker::run() {
_dp = &_convNet->getDataProvider();
_dp->setData(*_data);
_run();
_dp->clearData();
return false;
}
DataWorker::~DataWorker() {
}
/*
* ====================
* TrainingWorker
* ====================
*/
TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, double progress, bool test)
: DataWorker(convNet, data), _progress(progress), _test(test) {
}
void TrainingWorker::_run() {
_convNet->setTrainingProgress(_progress);
Cost& batchCost = *new Cost();
int numMinibatches = _dp->getNumMinibatches();
for (int i = 0; i < numMinibatches; i++) {
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(i, p, _test ? PASS_TEST : PASS_TRAIN);
_convNet->getCost(batchCost);
if (!_test) {
_convNet->bprop(p, PASS_TRAIN);
_convNet->updateWeights(p);
}
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* SyncWorker
* ====================
*/
SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) {
}
bool SyncWorker::run() {
_convNet->copyToCPU();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE));
return false;
}
/*
* ====================
* ExitWorker
* ====================
*/
ExitWorker::ExitWorker(ConvNet& convNet) : Worker(convNet) {
}
bool ExitWorker::run() {
return true;
}
/*
* ====================
* GradCheckWorker
* ====================
*/
GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data)
: DataWorker(convNet, data) {
}
void GradCheckWorker::_run() {
_convNet->checkGradients();
exit(0); // eh
}
/*
* ====================
* MultiviewTestWorker
* ====================
*/
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, Matrix& cpuProbs, const char* logregName)
: DataWorker(convNet, data), _numViews(numViews), _cpuProbs(&cpuProbs), _logregName(logregName) {
// assert(_data->getNumCases() % _numViews == 0);
// assert(convNet.getNumReplicas() == 1); // For now?
}
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews)
: DataWorker(convNet, data), _numViews(numViews), _cpuProbs(NULL), _logregName("") {
// assert(_data->getNumCases() % _numViews == 0);
}
MultiviewTestWorker::~MultiviewTestWorker() {
// delete _cpuProbs;
}
CPUData& MultiviewTestWorker::getMinibatch(int v, int i) {
int numCasesPerView = _dp->getNumCases() / _numViews;
int miniStart = v * numCasesPerView + i * _dp->getMinibatchSize();
int miniEnd = v * numCasesPerView + min(numCasesPerView, (i + 1) * _dp->getMinibatchSize());
CPUData& mini = _dp->getDataSlice(miniStart, miniEnd);
return mini;
}
void MultiviewTestWorker::_run() {
int numCasesPerView = _dp->getNumCases() / _numViews;
int numMiniPerView = DIVUP(numCasesPerView, _dp->getMinibatchSize());
Cost& batchCost = *new Cost();
for (int i = 0; i < numMiniPerView; i++) {
for (int v = 0; v < _numViews - 1; v++) {
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(getMinibatch(v, i), p, v == 0 ? PASS_MULTIVIEW_TEST_START : PASS_MULTIVIEW_TEST);
}
}
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(getMinibatch(_numViews - 1, i), p, PASS_MULTIVIEW_TEST_END);
_convNet->getCost(batchCost);
}
if (_cpuProbs != NULL) {
LogregCostLayer& logregLayer = *dynamic_cast<LogregCostLayer*>(&_convNet->getLayer(_logregName, 0));
NVMatrix::setDeviceID(logregLayer.getDeviceID());
Matrix& miniProbs = _cpuProbs->sliceRows(i * _dp->getMinibatchSize(),
min(numCasesPerView, (i + 1) * _dp->getMinibatchSize()));
NVMatrix& acts = logregLayer.getProbsAccum(0);
NVMatrix acts_T;
acts.transpose(acts_T);
acts_T.copyToHost(miniProbs);
delete &miniProbs;
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* FeatureWorker
* ====================
*/
FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, MatrixV& ftrs, stringv& layerNames, bool deleteFeatures)
: DataWorker(convNet, data), _ftrs(&ftrs), _layerNames(&layerNames), _deleteFeatures(deleteFeatures) {
assert(layerNames.size() == ftrs.size());
for (int i = 0; i < layerNames.size(); i++) {
assert(ftrs[i]->getNumRows() == data.getNumCases());
assert(!ftrs[i]->isTrans());
}
}
FeatureWorker::~FeatureWorker() {
if (_deleteFeatures) {
for (int i = 0; i < _ftrs->size(); i++) {
delete _ftrs->at(i);
}
delete _ftrs;
}
delete _layerNames;
}
void FeatureWorker::_run() {
Cost& batchCost = *new Cost();
map<int,int> repStart; // Feature write start offsets within minibatch
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
for (int f = 0; f < _layerNames->size(); f++) {
repStart[f] = 0;
}
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(i, p, PASS_FEATURE_GEN);
_convNet->getCost(batchCost);
for (int f = 0; f < _layerNames->size(); f++) {
if (_convNet->getLayer(_layerNames->at(f), 0).getFwdActiveInputReplicaIdx(p) >= 0) {
Matrix& miniFtrs = _ftrs->at(f)->sliceRows(i * _dp->getMinibatchSize(),
min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
for (int r = 0; r < _convNet->getLayer(_layerNames->at(f), 0).getNumReplicas(); ++r) {
Layer& ftrLayer = _convNet->getLayer(_layerNames->at(f), r);
int d = ftrLayer.getDeviceID();
NVMatrix::setDeviceID(d);
NVMatrix& acts = ftrLayer.getActs();
Matrix& repMiniFtrs = miniFtrs.sliceRows(repStart[f],
min(int(miniFtrs.getNumRows()), repStart[f] + acts.getLeadingDim()));
NVMatrix acts_T;
acts.transpose(false);
acts.transpose(acts_T);
acts_T.copyToHost(repMiniFtrs);
NVMatrix::syncStream(); // eh why not
delete &repMiniFtrs;
repStart[f] += acts.getLeadingDim();
}
delete &miniFtrs;
}
}
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* DataGradWorker
* ====================
*/
DataGradWorker::DataGradWorker(ConvNet& convNet, CPUData& data, Matrix& dataGrads, int dataLayerIdx, int softmaxLayerIdx)
: DataWorker(convNet, data), _dataGrads(&dataGrads), _dataLayerIdx(dataLayerIdx), _softmaxLayerIdx(softmaxLayerIdx) {
// assert(dataGrads.getNumRows() == data.getNumCases());
// assert(!dataGrads.isTrans());
}
DataGradWorker::~DataGradWorker() {
// delete _dataGrads;
}
void DataGradWorker::_run() {
// DataLayer& dataLayer = *dynamic_cast<DataLayer*>(&_convNet->getLayer(_dataLayerIdx));
// SoftmaxLayer& softmaxLayer = *dynamic_cast<SoftmaxLayer*>(&_convNet->getLayer(_softmaxLayerIdx));
// softmaxLayer.setDoLogregGrad(false);
// Cost& batchCost = *new Cost(0);
// for (int i = 0; i < _dp->getNumMinibatches(); i++) {
// _convNet->fprop(i, PASS_TEST);
// _convNet->getCost(batchCost);
// softmaxLayer.getActs().apply(NVMatrixOps::Log(), softmaxLayer.getActsGrad());
//
// softmaxLayer.getActsGrad().addScalar(1);
// softmaxLayer.getActsGrad().scale(-1);
// softmaxLayer.incRcvdBInputs();
// softmaxLayer.bprop(PASS_TEST);
//
// Matrix& miniDataGrads = _dataGrads->sliceRows(i * _dp->getMinibatchSize(),
// min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
// NVMatrix& grads = dataLayer.getActsGrad();
// NVMatrix grads_T;
// if (grads.isTrans()) {
// NVMatrix& soft_T = grads.getTranspose();
// soft_T.transpose(grads_T);
// delete &soft_T;
// } else {
// grads.transpose(grads_T);
// }
// grads_T.copyToHost(miniDataGrads);
// delete &miniDataGrads;
//
// _convNet->reset();
// }
// hipDeviceSynchronize();
// _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
| 1b47e76aa5b1d14dc6c8fb9a34d34b8bb060386c.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "../include/util.cuh"
#include "../include/worker.cuh"
#include "../include/timer.cuh"
using namespace std;
/*
* ====================
* WorkResult
* ====================
*/
WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) {
}
WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) {
}
WorkResult::~WorkResult() {
delete _results; // delete NULL is ok
}
Cost& WorkResult::getResults() const {
return *_results;
}
WorkResult::RESULTS WorkResult::getResultType() const {
return _resultType;
}
/*
* ====================
* Worker
* ====================
*/
Worker::Worker(ConvNet& convNet) : _convNet(&convNet) {
}
Worker::~Worker() {
}
/*
* ====================
* DataWorker
* ====================
*/
DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data), _dp(NULL) {
assert(_data != NULL);
}
bool DataWorker::run() {
_dp = &_convNet->getDataProvider();
_dp->setData(*_data);
_run();
_dp->clearData();
return false;
}
DataWorker::~DataWorker() {
}
/*
* ====================
* TrainingWorker
* ====================
*/
TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, double progress, bool test)
: DataWorker(convNet, data), _progress(progress), _test(test) {
}
void TrainingWorker::_run() {
_convNet->setTrainingProgress(_progress);
Cost& batchCost = *new Cost();
int numMinibatches = _dp->getNumMinibatches();
for (int i = 0; i < numMinibatches; i++) {
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(i, p, _test ? PASS_TEST : PASS_TRAIN);
_convNet->getCost(batchCost);
if (!_test) {
_convNet->bprop(p, PASS_TRAIN);
_convNet->updateWeights(p);
}
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* SyncWorker
* ====================
*/
SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) {
}
bool SyncWorker::run() {
_convNet->copyToCPU();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE));
return false;
}
/*
* ====================
* ExitWorker
* ====================
*/
ExitWorker::ExitWorker(ConvNet& convNet) : Worker(convNet) {
}
bool ExitWorker::run() {
return true;
}
/*
* ====================
* GradCheckWorker
* ====================
*/
GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data)
: DataWorker(convNet, data) {
}
void GradCheckWorker::_run() {
_convNet->checkGradients();
exit(0); // eh
}
/*
* ====================
* MultiviewTestWorker
* ====================
*/
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, Matrix& cpuProbs, const char* logregName)
: DataWorker(convNet, data), _numViews(numViews), _cpuProbs(&cpuProbs), _logregName(logregName) {
// assert(_data->getNumCases() % _numViews == 0);
// assert(convNet.getNumReplicas() == 1); // For now?
}
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews)
: DataWorker(convNet, data), _numViews(numViews), _cpuProbs(NULL), _logregName("") {
// assert(_data->getNumCases() % _numViews == 0);
}
MultiviewTestWorker::~MultiviewTestWorker() {
// delete _cpuProbs;
}
CPUData& MultiviewTestWorker::getMinibatch(int v, int i) {
int numCasesPerView = _dp->getNumCases() / _numViews;
int miniStart = v * numCasesPerView + i * _dp->getMinibatchSize();
int miniEnd = v * numCasesPerView + min(numCasesPerView, (i + 1) * _dp->getMinibatchSize());
CPUData& mini = _dp->getDataSlice(miniStart, miniEnd);
return mini;
}
void MultiviewTestWorker::_run() {
int numCasesPerView = _dp->getNumCases() / _numViews;
int numMiniPerView = DIVUP(numCasesPerView, _dp->getMinibatchSize());
Cost& batchCost = *new Cost();
for (int i = 0; i < numMiniPerView; i++) {
for (int v = 0; v < _numViews - 1; v++) {
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(getMinibatch(v, i), p, v == 0 ? PASS_MULTIVIEW_TEST_START : PASS_MULTIVIEW_TEST);
}
}
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(getMinibatch(_numViews - 1, i), p, PASS_MULTIVIEW_TEST_END);
_convNet->getCost(batchCost);
}
if (_cpuProbs != NULL) {
LogregCostLayer& logregLayer = *dynamic_cast<LogregCostLayer*>(&_convNet->getLayer(_logregName, 0));
NVMatrix::setDeviceID(logregLayer.getDeviceID());
Matrix& miniProbs = _cpuProbs->sliceRows(i * _dp->getMinibatchSize(),
min(numCasesPerView, (i + 1) * _dp->getMinibatchSize()));
NVMatrix& acts = logregLayer.getProbsAccum(0);
NVMatrix acts_T;
acts.transpose(acts_T);
acts_T.copyToHost(miniProbs);
delete &miniProbs;
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* FeatureWorker
* ====================
*/
FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, MatrixV& ftrs, stringv& layerNames, bool deleteFeatures)
: DataWorker(convNet, data), _ftrs(&ftrs), _layerNames(&layerNames), _deleteFeatures(deleteFeatures) {
assert(layerNames.size() == ftrs.size());
for (int i = 0; i < layerNames.size(); i++) {
assert(ftrs[i]->getNumRows() == data.getNumCases());
assert(!ftrs[i]->isTrans());
}
}
FeatureWorker::~FeatureWorker() {
if (_deleteFeatures) {
for (int i = 0; i < _ftrs->size(); i++) {
delete _ftrs->at(i);
}
delete _ftrs;
}
delete _layerNames;
}
void FeatureWorker::_run() {
Cost& batchCost = *new Cost();
map<int,int> repStart; // Feature write start offsets within minibatch
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
for (int f = 0; f < _layerNames->size(); f++) {
repStart[f] = 0;
}
for (int p = 0; p < _convNet->getNumPasses(); p++) {
_convNet->fprop(i, p, PASS_FEATURE_GEN);
_convNet->getCost(batchCost);
for (int f = 0; f < _layerNames->size(); f++) {
if (_convNet->getLayer(_layerNames->at(f), 0).getFwdActiveInputReplicaIdx(p) >= 0) {
Matrix& miniFtrs = _ftrs->at(f)->sliceRows(i * _dp->getMinibatchSize(),
min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
for (int r = 0; r < _convNet->getLayer(_layerNames->at(f), 0).getNumReplicas(); ++r) {
Layer& ftrLayer = _convNet->getLayer(_layerNames->at(f), r);
int d = ftrLayer.getDeviceID();
NVMatrix::setDeviceID(d);
NVMatrix& acts = ftrLayer.getActs();
Matrix& repMiniFtrs = miniFtrs.sliceRows(repStart[f],
min(int(miniFtrs.getNumRows()), repStart[f] + acts.getLeadingDim()));
NVMatrix acts_T;
acts.transpose(false);
acts.transpose(acts_T);
acts_T.copyToHost(repMiniFtrs);
NVMatrix::syncStream(); // eh why not
delete &repMiniFtrs;
repStart[f] += acts.getLeadingDim();
}
delete &miniFtrs;
}
}
}
}
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* DataGradWorker
* ====================
*/
DataGradWorker::DataGradWorker(ConvNet& convNet, CPUData& data, Matrix& dataGrads, int dataLayerIdx, int softmaxLayerIdx)
: DataWorker(convNet, data), _dataGrads(&dataGrads), _dataLayerIdx(dataLayerIdx), _softmaxLayerIdx(softmaxLayerIdx) {
// assert(dataGrads.getNumRows() == data.getNumCases());
// assert(!dataGrads.isTrans());
}
DataGradWorker::~DataGradWorker() {
// delete _dataGrads;
}
void DataGradWorker::_run() {
// DataLayer& dataLayer = *dynamic_cast<DataLayer*>(&_convNet->getLayer(_dataLayerIdx));
// SoftmaxLayer& softmaxLayer = *dynamic_cast<SoftmaxLayer*>(&_convNet->getLayer(_softmaxLayerIdx));
// softmaxLayer.setDoLogregGrad(false);
// Cost& batchCost = *new Cost(0);
// for (int i = 0; i < _dp->getNumMinibatches(); i++) {
// _convNet->fprop(i, PASS_TEST);
// _convNet->getCost(batchCost);
// softmaxLayer.getActs().apply(NVMatrixOps::Log(), softmaxLayer.getActsGrad());
//
// softmaxLayer.getActsGrad().addScalar(1);
// softmaxLayer.getActsGrad().scale(-1);
// softmaxLayer.incRcvdBInputs();
// softmaxLayer.bprop(PASS_TEST);
//
// Matrix& miniDataGrads = _dataGrads->sliceRows(i * _dp->getMinibatchSize(),
// min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
// NVMatrix& grads = dataLayer.getActsGrad();
// NVMatrix grads_T;
// if (grads.isTrans()) {
// NVMatrix& soft_T = grads.getTranspose();
// soft_T.transpose(grads_T);
// delete &soft_T;
// } else {
// grads.transpose(grads_T);
// }
// grads_T.copyToHost(miniDataGrads);
// delete &miniDataGrads;
//
// _convNet->reset();
// }
// cudaThreadSynchronize();
// _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
|
7a5ae130206ea498c9c2697244d4d005809ecd36.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudatest.h"
static Keying keyParamet = {};
static int softness = 10;
static float eps = 0.000001f;
extern "C" void test10() {
keyParamet.chromaColor = {0.15f, 0.6f, 0.0f};
keyParamet.alphaScale = 20.0f;
keyParamet.alphaExponent = 0.1f;
keyParamet.alphaCutoffMin = 0.2f;
keyParamet.lumaMask = 2.0f;
keyParamet.ambientColor = {0.1f, 0.1f, 0.9f};
keyParamet.despillScale = 0.0f;
keyParamet.despillExponent = 0.1f;
keyParamet.ambientScale = 1.0f;
std::string windowNameResult = "result";
std::string windowNameI = "I";
std::string windowNameP = "P";
std::string uiName = "uivvvvvvvvvvvv";
namedWindow(windowNameResult);
namedWindow(windowNameI);
namedWindow(windowNameP);
// crateUI1(uiName.c_str());
//StreamAccessor::getStream/wrapStream
Stream curentStream = {};
hipStream_t cudaStream = StreamAccessor::getStream(curentStream);
//nppSetStream(cudaStream);
VideoCapture cap = {};
int scale = 16;
int width = 1280;
int height = 720;
int scaleWidth = width / scale;
int scaleHeight = height / scale;
Mat frame(height, width, CV_8UC3);
Mat cpuResult;
Mat cpuGpuKeying;
Mat cpuI(height, width, CV_8UC4);
Mat cpuP(height, width, CV_8UC1);
cv::cuda::GpuMat gpuI(height, width, CV_8UC4);;
cv::cuda::GpuMat gpuP(height, width, CV_8UC1);;
cv::cuda::GpuMat gpuFrame;// (height, width, CV_8UC3);
cv::cuda::GpuMat sourceFrame(height, width, CV_8UC4);
cv::cuda::GpuMat gpuKeying(height, width, CV_32FC4);
cv::cuda::GpuMat gpuCvting;
//I_sub+p_sub
cv::cuda::GpuMat gpuResize(scaleHeight, scaleWidth, CV_32FC4);//I_sub+p_sub
//box I_sub+p_sub mean_Irgb+mean_p
cv::cuda::GpuMat mean_I(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat mean_Ipv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_rxv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_gbxfv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat mean_Ip(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_rx(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_gbxf(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat meanv(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat means(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat mean(scaleHeight, scaleWidth, CV_32FC4);
//cv::cuda::GpuMat result(height, width, CV_8UC4);
cv::cuda::GpuMat resultIP(height, width, CV_8UC4);
cv::cuda::GpuMat resultP(height, width, CV_8UC1);
dim3 block(32, 4);
dim3 grid(divUp(width, block.x), divUp(height, block.y));
dim3 grid2(divUp(scaleWidth, block.x), divUp(scaleHeight, block.y));
NppiSize oSizeROI; //NPPI blue
oSizeROI.width = scaleWidth;
oSizeROI.height = scaleHeight;
NppiSize oMaskSize = {};
oMaskSize.height = softness;
oMaskSize.width = softness;
NppiPoint oAnchor = {};
oAnchor.x = oMaskSize.width / 2;
oAnchor.y = oMaskSize.height / 2;
NppiPoint oSrcOffset = { 0, 0 };
cap.open(0, CAP_DSHOW);
cap.set(CAP_PROP_FRAME_WIDTH, width);
cap.set(CAP_PROP_FRAME_HEIGHT, height);
cap.set(CAP_PROP_FPS, 30);
auto ctime = clock();
auto pretime = clock();
int softness = 1;
while (int key = cv::waitKey(1)) {
if (curentStream.queryIfComplete()) {
// updateUI1(uiName.c_str());
}
cap >> frame;
ctime = clock();
gpuFrame.upload(frame, curentStream);
cv::cuda::cvtColor(gpuFrame, sourceFrame, COLOR_BGR2RGBA, 0, curentStream);
simpleKeyingUE4 << <grid, block, 0, cudaStream >> > (sourceFrame, gpuKeying, keyParamet);
gpuKeying.download(cpuGpuKeying);
cv::cvtColor(cpuGpuKeying, cpuGpuKeying, COLOR_BGR2RGBA);
//
cv::cuda::resize(gpuKeying, gpuResize, cv::Size(scaleWidth, scaleHeight), 0, 0, cv::INTER_NEAREST, curentStream);
findMatrix << <grid2, block, 0, cudaStream >> > (gpuResize, mean_Ipv, var_I_rxv, var_I_gbxfv);
//,,,,.
//,,,
nppiFilterBoxBorder_32f_C4R((Npp32f*)gpuResize.ptr<float4>(), gpuResize.step, oSizeROI, oSrcOffset, (Npp32f*)mean_I.ptr<float4>(), mean_I.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)mean_Ipv.ptr<float3>(), mean_Ipv.step, oSizeROI, oSrcOffset, (Npp32f*)mean_Ip.ptr<float3>(), mean_Ip.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)var_I_rxv.ptr<float3>(), var_I_rxv.step, oSizeROI, oSrcOffset, (Npp32f*)var_I_rx.ptr<float3>(), var_I_rx.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)var_I_gbxfv.ptr<float3>(), var_I_gbxfv.step, oSizeROI, oSrcOffset, (Npp32f*)var_I_gbxf.ptr<float3>(), var_I_gbxf.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
//
guidedFilter << <grid2, block, 0, cudaStream >> > (mean_I, mean_Ip, var_I_rx, var_I_gbxf, meanv, eps);
nppiFilterBoxBorder_32f_C4R((Npp32f*)meanv.ptr<float4>(), meanv.step, oSizeROI, oSrcOffset, (Npp32f*)means.ptr<float4>(), means.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
meanv.download(cpuI);
means.download(cpuP);
cv::cvtColor(cpuI, cpuI, COLOR_BGR2RGBA);
cv::cvtColor(cpuP, cpuP, COLOR_BGR2RGBA);
cv::cuda::resize(means, mean, cv::Size(width, height), 0, 0, cv::INTER_LINEAR, curentStream);
//
guidedFilterResult << <grid, block, 0, cudaStream >> > (gpuKeying, mean, resultIP, resultP);
//gpuKeying
//split << <grid, block, 0, cudaStream >> > (gpuKeying, gpuI, gpuP);
cv::cuda::cvtColor(resultIP, gpuCvting, COLOR_BGRA2RGBA, 0, curentStream);
gpuCvting.download(cpuResult);
cv::imshow(windowNameResult, cpuResult);
cv::imshow(windowNameI, cpuI);
cv::imshow(windowNameP, cpuP);
}
//hipFree((void*)guideData);
//hipFree((void*)guideDataCopy);
} | 7a5ae130206ea498c9c2697244d4d005809ecd36.cu | #include "cudatest.h"
static Keying keyParamet = {};
static int softness = 10;
static float eps = 0.000001f;
extern "C" void test10() {
keyParamet.chromaColor = {0.15f, 0.6f, 0.0f};
keyParamet.alphaScale = 20.0f;
keyParamet.alphaExponent = 0.1f;
keyParamet.alphaCutoffMin = 0.2f;
keyParamet.lumaMask = 2.0f;
keyParamet.ambientColor = {0.1f, 0.1f, 0.9f};
keyParamet.despillScale = 0.0f;
keyParamet.despillExponent = 0.1f;
keyParamet.ambientScale = 1.0f;
std::string windowNameResult = "result";
std::string windowNameI = "I";
std::string windowNameP = "P";
std::string uiName = "uivvvvvvvvvvvv";
namedWindow(windowNameResult);
namedWindow(windowNameI);
namedWindow(windowNameP);
// crateUI1(uiName.c_str());
//StreamAccessor::getStream/wrapStream
Stream curentStream = {};
cudaStream_t cudaStream = StreamAccessor::getStream(curentStream);
//nppSetStream(cudaStream);
VideoCapture cap = {};
int scale = 16;
int width = 1280;
int height = 720;
int scaleWidth = width / scale;
int scaleHeight = height / scale;
Mat frame(height, width, CV_8UC3);
Mat cpuResult;
Mat cpuGpuKeying;
Mat cpuI(height, width, CV_8UC4);
Mat cpuP(height, width, CV_8UC1);
cv::cuda::GpuMat gpuI(height, width, CV_8UC4);;
cv::cuda::GpuMat gpuP(height, width, CV_8UC1);;
cv::cuda::GpuMat gpuFrame;// (height, width, CV_8UC3);
cv::cuda::GpuMat sourceFrame(height, width, CV_8UC4);
cv::cuda::GpuMat gpuKeying(height, width, CV_32FC4);
cv::cuda::GpuMat gpuCvting;
//I_sub+p_sub
cv::cuda::GpuMat gpuResize(scaleHeight, scaleWidth, CV_32FC4);//I_sub+p_sub
//box I_sub+p_sub mean_Irgb+mean_p
cv::cuda::GpuMat mean_I(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat mean_Ipv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_rxv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_gbxfv(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat mean_Ip(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_rx(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat var_I_gbxf(scaleHeight, scaleWidth, CV_32FC3);
cv::cuda::GpuMat meanv(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat means(scaleHeight, scaleWidth, CV_32FC4);
cv::cuda::GpuMat mean(scaleHeight, scaleWidth, CV_32FC4);
//cv::cuda::GpuMat result(height, width, CV_8UC4);
cv::cuda::GpuMat resultIP(height, width, CV_8UC4);
cv::cuda::GpuMat resultP(height, width, CV_8UC1);
dim3 block(32, 4);
dim3 grid(divUp(width, block.x), divUp(height, block.y));
dim3 grid2(divUp(scaleWidth, block.x), divUp(scaleHeight, block.y));
NppiSize oSizeROI; //NPPI blue
oSizeROI.width = scaleWidth;
oSizeROI.height = scaleHeight;
NppiSize oMaskSize = {};
oMaskSize.height = softness;
oMaskSize.width = softness;
NppiPoint oAnchor = {};
oAnchor.x = oMaskSize.width / 2;
oAnchor.y = oMaskSize.height / 2;
NppiPoint oSrcOffset = { 0, 0 };
cap.open(0, CAP_DSHOW);
cap.set(CAP_PROP_FRAME_WIDTH, width);
cap.set(CAP_PROP_FRAME_HEIGHT, height);
cap.set(CAP_PROP_FPS, 30);
auto ctime = clock();
auto pretime = clock();
int softness = 1;
while (int key = cv::waitKey(1)) {
if (curentStream.queryIfComplete()) {
// updateUI1(uiName.c_str());
}
cap >> frame;
ctime = clock();
gpuFrame.upload(frame, curentStream);
cv::cuda::cvtColor(gpuFrame, sourceFrame, COLOR_BGR2RGBA, 0, curentStream);
simpleKeyingUE4 << <grid, block, 0, cudaStream >> > (sourceFrame, gpuKeying, keyParamet);
gpuKeying.download(cpuGpuKeying);
cv::cvtColor(cpuGpuKeying, cpuGpuKeying, COLOR_BGR2RGBA);
//缩放大小
cv::cuda::resize(gpuKeying, gpuResize, cv::Size(scaleWidth, scaleHeight), 0, 0, cv::INTER_NEAREST, curentStream);
findMatrix << <grid2, block, 0, cudaStream >> > (gpuResize, mean_Ipv, var_I_rxv, var_I_gbxfv);
//此外,在计算每个窗口的线性系数时,我们可以发现一个像素会被多个窗口包含,也就是说,每个像素都由多个线性函数所描述.
//因此,如之前所说,要具体求某一点的输出值时,只需将所有包含该点的线性函数值平均即可
nppiFilterBoxBorder_32f_C4R((Npp32f*)gpuResize.ptr<float4>(), gpuResize.step, oSizeROI, oSrcOffset, (Npp32f*)mean_I.ptr<float4>(), mean_I.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)mean_Ipv.ptr<float3>(), mean_Ipv.step, oSizeROI, oSrcOffset, (Npp32f*)mean_Ip.ptr<float3>(), mean_Ip.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)var_I_rxv.ptr<float3>(), var_I_rxv.step, oSizeROI, oSrcOffset, (Npp32f*)var_I_rx.ptr<float3>(), var_I_rx.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
nppiFilterBoxBorder_32f_C3R((Npp32f*)var_I_gbxfv.ptr<float3>(), var_I_gbxfv.step, oSizeROI, oSrcOffset, (Npp32f*)var_I_gbxf.ptr<float3>(), var_I_gbxf.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
//求导
guidedFilter << <grid2, block, 0, cudaStream >> > (mean_I, mean_Ip, var_I_rx, var_I_gbxf, meanv, eps);
nppiFilterBoxBorder_32f_C4R((Npp32f*)meanv.ptr<float4>(), meanv.step, oSizeROI, oSrcOffset, (Npp32f*)means.ptr<float4>(), means.step, oSizeROI, oMaskSize, oAnchor, NPP_BORDER_REPLICATE);
meanv.download(cpuI);
means.download(cpuP);
cv::cvtColor(cpuI, cpuI, COLOR_BGR2RGBA);
cv::cvtColor(cpuP, cpuP, COLOR_BGR2RGBA);
cv::cuda::resize(means, mean, cv::Size(width, height), 0, 0, cv::INTER_LINEAR, curentStream);
//求结果
guidedFilterResult << <grid, block, 0, cudaStream >> > (gpuKeying, mean, resultIP, resultP);
//gpuKeying分别显示三通与一通
//split << <grid, block, 0, cudaStream >> > (gpuKeying, gpuI, gpuP);
cv::cuda::cvtColor(resultIP, gpuCvting, COLOR_BGRA2RGBA, 0, curentStream);
gpuCvting.download(cpuResult);
cv::imshow(windowNameResult, cpuResult);
cv::imshow(windowNameI, cpuI);
cv::imshow(windowNameP, cpuP);
}
//cudaFree((void*)guideData);
//cudaFree((void*)guideDataCopy);
} |
c94fd31f7d06219d48b31338af499e6b4983a2e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ###
// ###
// ### Dennis Mack, dennis.mack@tum.de, p060
// ### Adrian Haarbach, haarbach@in.tum.de, p077
// ### Markus Schlaffer, markus.schlaffer@in.tum.de, p070
// USAGE: ./ex15/main -length 100000 -repeats 1000
// createArray ndern um input zu ndern
/*
bsp ausgabe:
./ex15/main -length 100000 -repeats 1000
repeats: 1000
length: 100000
cpu result: 100000
gpu result: 100000
cublas result: 100000
avg time cpu: 0.61 ms
avg time gpu: 3.46 ms
avg time gpu allocfree: 0.49 ms
avg time cublas: 0.29 ms
*/
#include <helper.h>
#include <iostream>
#include <math.h>
#include "rocblas.h"
using namespace std;
string get_cublas_error(hipblasStatus_t stat) {
switch(stat)
{
case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "Unknown error";
}
void cublas_check(hipblasStatus_t stat) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
cerr << "Received error: " << get_cublas_error(stat) << endl;
}
}
__host__ float sumArrayCPU(float *in, size_t n){
float result=0;
for(size_t i=0; i<n; i++){
result+=in[i];
}
return result;
}
__global__ void sumArray(float *input, float *results, size_t n){
extern __shared__ float sdata[];
int i = threadIdx.x + blockDim.x * blockIdx.x;
int tx = threadIdx.x;
//load input into shared
float x=0;
if(i<n) x=input[i];
sdata[tx]=x;
__syncthreads();
if (i>=n) return;
//reduction
for(int offset = blockDim.x/2; offset >0; offset >>=1){
if(tx < offset){
//add a partial sum upstream to our own
sdata[tx] += sdata[tx+offset];
}
__syncthreads();
}
if(tx==0){
results[blockIdx.x]=sdata[0];
}
}
void createArray(float* array, size_t length){
for(size_t i=0;i<length;i++){
array[i]=1;
}
}
float GetAverage(float dArray[], int iSize) {
float dSum = dArray[0];
for (int i = 1; i < iSize; ++i) {
dSum += dArray[i];
}
return dSum/iSize;
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// ### Define your own parameters here as needed
float length=1000;
getParam("length", length, argc, argv);
cout << "length: " << length << endl;
float *input, *output, *middle, *cublasout, cpuout;
input=(float*)malloc(length*sizeof(float));
output=(float*)malloc(length*sizeof(float));
middle=(float*)malloc(length*sizeof(float));
cublasout=(float*)malloc(length*sizeof(float));
createArray(input, length);
float *tc, *tg, *tg2, *tcu;
tc=(float*)malloc(repeats*sizeof(float));
tg=(float*)malloc(repeats*sizeof(float));
tg2=(float*)malloc(repeats*sizeof(float));
tcu=(float*)malloc(repeats*sizeof(float));
for(int i=0;i<repeats;i++){
//CPU:
Timer timercpu, timergpu, timergpu2, timercublas;
timercpu.start();
cpuout=sumArrayCPU(input,length);
timercpu.end();
tc[i] = timercpu.get();
//GPU:
timergpu.start();
float *d_input, *d_output, *d_middle, *d_cublas;
hipMalloc(&d_input, length * sizeof(float) ); CUDA_CHECK;
hipMemcpy(d_input, input, length * sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK;
hipMalloc(&d_output, length * sizeof(float) ); CUDA_CHECK;
hipMemset(d_output, 0, length * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_middle, length * sizeof(float) ); CUDA_CHECK;
hipMemset(d_middle, 0, length * sizeof(float)); CUDA_CHECK;
hipMalloc(&d_cublas, length * sizeof(float) ); CUDA_CHECK;
hipMemset(d_cublas, 0, length * sizeof(float)); CUDA_CHECK;
dim3 block = dim3(128,1,1);
dim3 grid = dim3((length + block.x - 1 ) / block.x, 1, 1);
size_t smBytes = block.x * sizeof(float);
timergpu2.start();
hipLaunchKernelGGL(( sumArray) , dim3(grid),dim3(block), smBytes, 0, d_input, d_middle, length); CUDA_CHECK;
hipDeviceSynchronize(); CUDA_CHECK;
hipLaunchKernelGGL(( sumArray) , dim3(grid),dim3(block), smBytes, 0, d_middle, d_output, length); CUDA_CHECK;
hipDeviceSynchronize(); CUDA_CHECK;
timergpu2.end();
tg2[i] = timergpu2.get();
//CUBLAS:
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
cublas_check(stat);
timercublas.start();
stat=hipblasSetVector(length, sizeof(*input), input, 1, d_cublas, 1);cublas_check(stat);
stat = hipblasSasum(handle, length, d_cublas, 1, cublasout);cublas_check(stat);
timercublas.end();
tcu[i] = timercublas.get();
hipblasDestroy(handle);
hipMemcpy(middle, d_middle, length * sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(output, d_output, length * sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;
hipFree(d_output); CUDA_CHECK;
hipFree(d_input); CUDA_CHECK;
hipFree(d_middle); CUDA_CHECK;
hipFree(d_cublas); CUDA_CHECK;
timergpu.end();
tg[i] = timergpu.get();
}
cout << "cpu result: " << cpuout << endl;
// //print blockwise addition
// for(int i=0;i<length;i++){
// if(middle[i]==0) break;
// cout << middle[i] << ", ";
// }
// cout << endl;
cout << "gpu result: " << sumArrayCPU(output,length) << endl;
cout << "cublas result: " << cublasout[0] << endl;
cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl;
cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl;
cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl;
cout << "avg time cublas: " << GetAverage(tcu, repeats)*1000 << " ms" << endl;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| c94fd31f7d06219d48b31338af499e6b4983a2e0.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ###
// ###
// ### Dennis Mack, dennis.mack@tum.de, p060
// ### Adrian Haarbach, haarbach@in.tum.de, p077
// ### Markus Schlaffer, markus.schlaffer@in.tum.de, p070
// USAGE: ./ex15/main -length 100000 -repeats 1000
// createArray ändern um input zu ändern
/*
bsp ausgabe:
./ex15/main -length 100000 -repeats 1000
repeats: 1000
length: 100000
cpu result: 100000
gpu result: 100000
cublas result: 100000
avg time cpu: 0.61 ms
avg time gpu: 3.46 ms
avg time gpu allocfree: 0.49 ms
avg time cublas: 0.29 ms
*/
#include <helper.h>
#include <iostream>
#include <math.h>
#include "cublas_v2.h"
using namespace std;
string get_cublas_error(cublasStatus_t stat) {
switch(stat)
{
case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "Unknown error";
}
void cublas_check(cublasStatus_t stat) {
if (stat != CUBLAS_STATUS_SUCCESS) {
cerr << "Received error: " << get_cublas_error(stat) << endl;
}
}
__host__ float sumArrayCPU(float *in, size_t n){
float result=0;
for(size_t i=0; i<n; i++){
result+=in[i];
}
return result;
}
__global__ void sumArray(float *input, float *results, size_t n){
extern __shared__ float sdata[];
int i = threadIdx.x + blockDim.x * blockIdx.x;
int tx = threadIdx.x;
//load input into shared
float x=0;
if(i<n) x=input[i];
sdata[tx]=x;
__syncthreads();
if (i>=n) return;
//reduction
for(int offset = blockDim.x/2; offset >0; offset >>=1){
if(tx < offset){
//add a partial sum upstream to our own
sdata[tx] += sdata[tx+offset];
}
__syncthreads();
}
if(tx==0){
results[blockIdx.x]=sdata[0];
}
}
void createArray(float* array, size_t length){
for(size_t i=0;i<length;i++){
array[i]=1;
}
}
float GetAverage(float dArray[], int iSize) {
float dSum = dArray[0];
for (int i = 1; i < iSize; ++i) {
dSum += dArray[i];
}
return dSum/iSize;
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// ### Define your own parameters here as needed
float length=1000;
getParam("length", length, argc, argv);
cout << "length: " << length << endl;
float *input, *output, *middle, *cublasout, cpuout;
input=(float*)malloc(length*sizeof(float));
output=(float*)malloc(length*sizeof(float));
middle=(float*)malloc(length*sizeof(float));
cublasout=(float*)malloc(length*sizeof(float));
createArray(input, length);
float *tc, *tg, *tg2, *tcu;
tc=(float*)malloc(repeats*sizeof(float));
tg=(float*)malloc(repeats*sizeof(float));
tg2=(float*)malloc(repeats*sizeof(float));
tcu=(float*)malloc(repeats*sizeof(float));
for(int i=0;i<repeats;i++){
//CPU:
Timer timercpu, timergpu, timergpu2, timercublas;
timercpu.start();
cpuout=sumArrayCPU(input,length);
timercpu.end();
tc[i] = timercpu.get();
//GPU:
timergpu.start();
float *d_input, *d_output, *d_middle, *d_cublas;
cudaMalloc(&d_input, length * sizeof(float) ); CUDA_CHECK;
cudaMemcpy(d_input, input, length * sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMalloc(&d_output, length * sizeof(float) ); CUDA_CHECK;
cudaMemset(d_output, 0, length * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_middle, length * sizeof(float) ); CUDA_CHECK;
cudaMemset(d_middle, 0, length * sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_cublas, length * sizeof(float) ); CUDA_CHECK;
cudaMemset(d_cublas, 0, length * sizeof(float)); CUDA_CHECK;
dim3 block = dim3(128,1,1);
dim3 grid = dim3((length + block.x - 1 ) / block.x, 1, 1);
size_t smBytes = block.x * sizeof(float);
timergpu2.start();
sumArray <<<grid,block, smBytes>>> (d_input, d_middle, length); CUDA_CHECK;
cudaDeviceSynchronize(); CUDA_CHECK;
sumArray <<<grid,block, smBytes>>> (d_middle, d_output, length); CUDA_CHECK;
cudaDeviceSynchronize(); CUDA_CHECK;
timergpu2.end();
tg2[i] = timergpu2.get();
//CUBLAS:
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
cublas_check(stat);
timercublas.start();
stat=cublasSetVector(length, sizeof(*input), input, 1, d_cublas, 1);cublas_check(stat);
stat = cublasSasum(handle, length, d_cublas, 1, cublasout);cublas_check(stat);
timercublas.end();
tcu[i] = timercublas.get();
cublasDestroy(handle);
cudaMemcpy(middle, d_middle, length * sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(output, d_output, length * sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaFree(d_output); CUDA_CHECK;
cudaFree(d_input); CUDA_CHECK;
cudaFree(d_middle); CUDA_CHECK;
cudaFree(d_cublas); CUDA_CHECK;
timergpu.end();
tg[i] = timergpu.get();
}
cout << "cpu result: " << cpuout << endl;
// //print blockwise addition
// for(int i=0;i<length;i++){
// if(middle[i]==0) break;
// cout << middle[i] << ", ";
// }
// cout << endl;
cout << "gpu result: " << sumArrayCPU(output,length) << endl;
cout << "cublas result: " << cublasout[0] << endl;
cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl;
cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl;
cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl;
cout << "avg time cublas: " << GetAverage(tcu, repeats)*1000 << " ms" << endl;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
2b8a643a582012d99a6e7d14f1eceaa7d638fc6f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include "SSIM.h"
#include "main.h"
#include "psnr.h"
#include "stvssim.h"
#include "stvssim.cuh"
#include "ssim.cuh"
#include <omp.h>
#include <ctime>
#include <chrono>
using namespace std;
string FF_PATH = "";
int CHUNK_SIZE = 4;
int THREADS=256 ;
int compare(const void * a, const void * b)
{
return (*(double*)a - *(double*)b);
}
PictureData *getVideoInfo(string path) {
PictureData * data = new PictureData;
string cmd = FF_PATH+"ffprobe -v error -count_frames -of flat=s=_ -select_streams v:0 -show_entries stream=width,height,r_frame_rate,nb_read_frames -show_entries format=duration,nb_frames -of default=noprint_wrappers=1:nokey=1 " + path;
//cout << cmd.c_str() << endl;
//string cmd="ffprobe -v error -of flat=s=_ -select_streams v:0 -show_entries stream=width,height,nb_frames -of default=noprint_wrappers=1:nokey=1 "+path;
string cmd2 = FF_PATH + "ffprobe - select_streams v - show_streams" + path + " 2> NUL";
#ifdef __linux__
FILE *stream = popen(cmd.c_str(), "r");
#else
FILE *stream = _popen(cmd.c_str(), "r");
#endif
char buffer[50];
fgets(buffer, 10, stream);
data->width = atoi(buffer);
fgets(buffer, 10, stream);
data->height = atoi(buffer);
fgets(buffer, 20, stream);
string tmp = buffer;
int pos = tmp.find('/');
int fps1 = atoi(buffer);
double fps2 = atoi(tmp.substr(pos + 1).c_str());
double fps = fps1 / fps2;
//cout << fps << endl;
fgets(buffer, 10, stream);
data->frame_count = atoi(buffer);
fgets(buffer, 20, stream);
//cout << buffer << endl;
double len = atof(buffer);
//cout<<len<<endl;
//cout << data->frame_count << endl;
//data->frame_count = len*fps;
data->size = data->width*data->height;
//else data->frame_count = 3121;//181250; // 7100;//3121;//1359;//7192;
return data;
}
FILE * startFFmpeg(string path) {
FILE *stream;
cout << path.c_str() << endl;
#ifdef __linux__
string cmd = FF_PATH + "ffmpeg -i " + path + " -f image2pipe -pix_fmt yuv420p -vcodec rawvideo - 2>/dev/null";
//cout << cmd << endl;
stream = popen(cmd.c_str(), "r");
#else
string cmd = FF_PATH + "ffmpeg -i " + path + " -f image2pipe -threads 3 -pix_fmt yuv420p -vcodec rawvideo - 2>NUL";
//-c:v h264_qsv
stream = _popen(cmd.c_str(), "rb");
#endif
//cout << cmd.c_str() << endl;
return stream;
}
double ** countMetric(FILE ** streams, FILE * ref, int files_count, PictureData * frame, string type, double ** results) {
//double ** results = new double*[files_count];
unsigned char *** data = new unsigned char **[files_count];
unsigned char ** dataRef = new unsigned char *[CHUNK_SIZE];
unsigned char * dataTrash = new unsigned char[frame->width*frame->height / 2];
int frames = frame->frame_count;
int rec;
for (int k = 0; k < files_count; k++) {
data[k] = new unsigned char *[CHUNK_SIZE];
//results[k] = new double [frame->frame_count];
for (int j = 0; j < CHUNK_SIZE; j++) {
data[k][j] = new unsigned char[frame->width*frame->height];
}
}
for (int j = 0; j < CHUNK_SIZE; j++) {
dataRef[j] = new unsigned char[frame->width*frame->height];
}
for (int i = 0; i < frame->frame_count / CHUNK_SIZE; i++) {
for (int j = 0; j < CHUNK_SIZE; j++) {
rec = fread(dataRef[j], 1, frame->width*frame->height, ref);
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
rec = fread(dataTrash, 1, frame->width*frame->height / 2, ref); //when using yuv, first 2/3 of the picture are Lumma, others are UV which we do not evaluate
if (rec != frame->width*frame->height / 2) {
cout << "error" << endl;
return NULL;
}
for (int k = 0; k < files_count; k++) {
int rec = fread(data[k][j], 1, frame->width*frame->height, streams[k]);
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
rec = fread(dataTrash, 1, frame->width*frame->height / 2, streams[k]); //when using yuv, first 2/3 of the picture are Lumma, others are UV which we do not evaluate
if (rec != frame->width*frame->height / 2) {
cout << "error" << endl;
return NULL;
}
}
}
omp_set_num_threads(CHUNK_SIZE);
for (int k = 0; k < files_count; k++) {
#pragma omp parallel for
//cout<<"size: "<<CHUNK_SIZE<<"Threads real: "<<omp_get_num_threads()<<endl;
for (int j = 0; j < CHUNK_SIZE; j++) {
if (string(type) == string("SSIM")) results[k][j + i*CHUNK_SIZE] = countSSIM(dataRef[j], data[k][j], frame->width*frame->height, frame->width);
else results[k][j + i*CHUNK_SIZE] = countPSNR(dataRef[j], data[k][j], frame->width*frame->height);
//cout << j+i * CHUNK_SIZE << " " << results[k][j+i*CHUNK_SIZE] << endl;
}
}
}
for (int j = 0; j < frame->frame_count % CHUNK_SIZE; j++) {
rec = fread(dataRef[j], 1, frame->width*frame->height, ref);
rec = fread(dataTrash, 1, frame->width*frame->height/2, ref);
//fseek(ref, frame->width*frame->height / 2, SEEK_CUR); //skip others except Y channel
if (rec != frame->width*frame->height/2) {
cout << "error" << endl;
return NULL;
}
}
for (int k = 0; k < files_count; k++) {
for (int j = 0; j < frame->frame_count % CHUNK_SIZE; j++) {
rec = fread(data[k][j], 1, frame->width*frame->height, streams[k]);
fread(dataTrash, 1, frame->width*frame->height/2, streams[k]);
//fseek(streams[k], frame->width*frame->height / 2, SEEK_CUR); //skip others except Y channel
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
if (string(type) == string("SSIM")) results[k][frame->frame_count - frame->frame_count % CHUNK_SIZE + j] = countSSIM(dataRef[j], data[k][j], frame->width*frame->height, frame->width);
else results[k][frame->frame_count - frame->frame_count % CHUNK_SIZE + j] = countPSNR(dataRef[j], data[k][j], frame->width*frame->height);
//cout << frame2->frame_count - frame2->frame_count % CHUNK_SIZE + j << " " << results[frame2->frame_count - frame2->frame_count % CHUNK_SIZE + j] << endl;
}
}
//delete frame->data;
//delete frame;
return results;
}
int readFromFile(unsigned char *& data, int count, FILE * file) {
int rec = fread(data, 1, count, file);
if (rec != count) {
cout << "Error while reading from file" << endl;
exit(-1);
}
return rec;
}
int main(int argc, char ** argv) {
/*int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %s has compute capability %d.%d.\n", deviceProp.name, deviceProp.major, deviceProp.minor);
}*/
string reference;
string file1, file2;
string type;
int gpu = 0;
string * files = new string[MAX_FILES];
int files_count = 0;
//cout<<argv<<endl;
if (argc < 6) { // Check the value of argc. If not enough parameters have been passed, inform user and exit.
cout << argc << endl;
cout << "Usage is -r <reference file> -in <first video to compare> -in <second video to compare> [-type <STVSSIM, SSIM or PSNR>] [-ffpath <path to folder with ffmpeg and ffprobe executables>] [CUDA] \n"; // Inform the user of how to use the program
//std::cin.get();
exit(0);
}
else { // if we got enough parameters...
//std::cout << argv[0];
for (int i = 1; i < argc; i++) { /* We will iterate over argv[] to get the parameters stored inside.
* Note that we're starting on 1 because we don't need to know the
* path of the program, which is stored in argv[0] */
if (i + 1 != argc) { // Check that we haven't finished parsing already
if (string(argv[i]) == string("-r")) {
// We know the next argument *should* be the filename:
reference = argv[i + 1];
//std::cout << reference << endl;
}
else if (string(argv[i]) == string("-in")) {
files[files_count] = string(argv[i + 1]);
//cout << files[files_count].c_str() << endl;
files_count++;
}
/*else if (string(argv[i]) == string("-in2")) {
file2 = string(argv[i + 1]);
}*/
else if (string(argv[i]) == string("-type")) {
type = string(argv[i + 1]);
}
else if (string(argv[i]) == string("-threads")) {
CHUNK_SIZE = atoi(argv[i + 1]);
cout<<"Threads: "<<CHUNK_SIZE<<endl;
}
else if (string(argv[i]) == string("-CUDA_threads")) {
THREADS = atoi(argv[i + 1]);
cout<<"CUDA threads: "<<THREADS<<endl;
}
else if (string(argv[i]) == string("-ffpath")) {
FF_PATH = string(argv[i + 1]);
}
else if (string(argv[i]) == string("CUDA")) { //we will use CUDA computation
gpu = 1;
}
else {
//cout << "Not enough or invalid arguments, please try again.\n";
//Sleep(2000);
//exit(0);
}
//std::cout << argv[i] << " ";
}
else if (i != argc){
if (string(argv[i]) == string("CUDA")) { //we will use CUDA computation
gpu = 1;
}
}
}
}
const int MAX_BUFFER = 2048000;
PictureData * frame;
FILE ** streams;
FILE * ref;
streams = new FILE *[files_count];
double ** results = new double *[files_count];
frame = getVideoInfo(reference);
ref = startFFmpeg(reference);
for (int i = 0; i < files_count; i++) {
//frame = getVideoInfo(files[i]);
streams[i] = startFFmpeg(files[i]);
results[i] = new double[frame->frame_count];
}
int rec;
double *sum = new double[files_count];
int * frames = new int[files_count];
for (int i = 0; i < files_count; i++) {
frames[i] = frame->frame_count;
sum[i] = 0;
}
chrono::high_resolution_clock::time_point t1 = chrono::high_resolution_clock::now();
if (gpu == 1) {
if (string(type) == string("STVSSIM")) {
cout << "stvssim CUDA" << endl;
countMetricSTVSSIM_CUDA(streams, ref, files_count, frame, results, frames);
}
else {
cout << "SSIM CUDA" << endl;
countCUDA(streams, ref, files_count, frame, type, results);
}
}
else if (string(type) == string("STVSSIM")) {
cout << "stvssim CPU" << endl;
countMetricSTVSSIM(streams, ref, files_count, frame, results, frames);
delete streams; //?
}
else {
cout << "SSIM/PSNR CPU" << endl;
countMetric(streams, ref, files_count, frame, type, results); //SSIM, PSNR
}
chrono::high_resolution_clock::time_point t2 = chrono::high_resolution_clock::now();
chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
std::cout << "It took me " << time_span.count() << " seconds.";
std::cout << std::endl;
for (int j = 0; j < files_count; j++) {
cout << "file number: " << j << "\t";
}
cout<<endl;
for (int i = 0; i < frames[0]; i++) {
//cout << i<<"\t";
for (int j = 0; j < files_count; j++) {
//cout <<results[j][i] << "\t";
if (std::isfinite(results[j][i]))
sum[j] += results[j][i];
else frames[j]--;
}
//cout<<endl;
}
cout << "File" << "\t" << "AVG" << "\t" << "Median" << endl;
for (int i = 0; i < files_count; i++) {
cout <<i<<"\t" << sum[i] / frames[i];
qsort(results[i], frames[i], sizeof(double), compare);
cout << "\t" << results[i][frames[i] / 2] << endl;
}
}
| 2b8a643a582012d99a6e7d14f1eceaa7d638fc6f.cu | #include <iostream>
#include <fstream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include "SSIM.h"
#include "main.h"
#include "psnr.h"
#include "stvssim.h"
#include "stvssim.cuh"
#include "ssim.cuh"
#include <omp.h>
#include <ctime>
#include <chrono>
using namespace std;
string FF_PATH = "";
int CHUNK_SIZE = 4;
int THREADS=256 ;
int compare(const void * a, const void * b)
{
return (*(double*)a - *(double*)b);
}
PictureData *getVideoInfo(string path) {
PictureData * data = new PictureData;
string cmd = FF_PATH+"ffprobe -v error -count_frames -of flat=s=_ -select_streams v:0 -show_entries stream=width,height,r_frame_rate,nb_read_frames -show_entries format=duration,nb_frames -of default=noprint_wrappers=1:nokey=1 " + path;
//cout << cmd.c_str() << endl;
//string cmd="ffprobe -v error -of flat=s=_ -select_streams v:0 -show_entries stream=width,height,nb_frames -of default=noprint_wrappers=1:nokey=1 "+path;
string cmd2 = FF_PATH + "ffprobe - select_streams v - show_streams" + path + " 2> NUL";
#ifdef __linux__
FILE *stream = popen(cmd.c_str(), "r");
#else
FILE *stream = _popen(cmd.c_str(), "r");
#endif
char buffer[50];
fgets(buffer, 10, stream);
data->width = atoi(buffer);
fgets(buffer, 10, stream);
data->height = atoi(buffer);
fgets(buffer, 20, stream);
string tmp = buffer;
int pos = tmp.find('/');
int fps1 = atoi(buffer);
double fps2 = atoi(tmp.substr(pos + 1).c_str());
double fps = fps1 / fps2;
//cout << fps << endl;
fgets(buffer, 10, stream);
data->frame_count = atoi(buffer);
fgets(buffer, 20, stream);
//cout << buffer << endl;
double len = atof(buffer);
//cout<<len<<endl;
//cout << data->frame_count << endl;
//data->frame_count = len*fps;
data->size = data->width*data->height;
//else data->frame_count = 3121;//181250; // 7100;//3121;//1359;//7192;
return data;
}
FILE * startFFmpeg(string path) {
FILE *stream;
cout << path.c_str() << endl;
#ifdef __linux__
string cmd = FF_PATH + "ffmpeg -i " + path + " -f image2pipe -pix_fmt yuv420p -vcodec rawvideo - 2>/dev/null";
//cout << cmd << endl;
stream = popen(cmd.c_str(), "r");
#else
string cmd = FF_PATH + "ffmpeg -i " + path + " -f image2pipe -threads 3 -pix_fmt yuv420p -vcodec rawvideo - 2>NUL";
//-c:v h264_qsv
stream = _popen(cmd.c_str(), "rb");
#endif
//cout << cmd.c_str() << endl;
return stream;
}
double ** countMetric(FILE ** streams, FILE * ref, int files_count, PictureData * frame, string type, double ** results) {
//double ** results = new double*[files_count];
unsigned char *** data = new unsigned char **[files_count];
unsigned char ** dataRef = new unsigned char *[CHUNK_SIZE];
unsigned char * dataTrash = new unsigned char[frame->width*frame->height / 2];
int frames = frame->frame_count;
int rec;
for (int k = 0; k < files_count; k++) {
data[k] = new unsigned char *[CHUNK_SIZE];
//results[k] = new double [frame->frame_count];
for (int j = 0; j < CHUNK_SIZE; j++) {
data[k][j] = new unsigned char[frame->width*frame->height];
}
}
for (int j = 0; j < CHUNK_SIZE; j++) {
dataRef[j] = new unsigned char[frame->width*frame->height];
}
for (int i = 0; i < frame->frame_count / CHUNK_SIZE; i++) {
for (int j = 0; j < CHUNK_SIZE; j++) {
rec = fread(dataRef[j], 1, frame->width*frame->height, ref);
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
rec = fread(dataTrash, 1, frame->width*frame->height / 2, ref); //when using yuv, first 2/3 of the picture are Lumma, others are UV which we do not evaluate
if (rec != frame->width*frame->height / 2) {
cout << "error" << endl;
return NULL;
}
for (int k = 0; k < files_count; k++) {
int rec = fread(data[k][j], 1, frame->width*frame->height, streams[k]);
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
rec = fread(dataTrash, 1, frame->width*frame->height / 2, streams[k]); //when using yuv, first 2/3 of the picture are Lumma, others are UV which we do not evaluate
if (rec != frame->width*frame->height / 2) {
cout << "error" << endl;
return NULL;
}
}
}
omp_set_num_threads(CHUNK_SIZE);
for (int k = 0; k < files_count; k++) {
#pragma omp parallel for
//cout<<"size: "<<CHUNK_SIZE<<"Threads real: "<<omp_get_num_threads()<<endl;
for (int j = 0; j < CHUNK_SIZE; j++) {
if (string(type) == string("SSIM")) results[k][j + i*CHUNK_SIZE] = countSSIM(dataRef[j], data[k][j], frame->width*frame->height, frame->width);
else results[k][j + i*CHUNK_SIZE] = countPSNR(dataRef[j], data[k][j], frame->width*frame->height);
//cout << j+i * CHUNK_SIZE << " " << results[k][j+i*CHUNK_SIZE] << endl;
}
}
}
for (int j = 0; j < frame->frame_count % CHUNK_SIZE; j++) {
rec = fread(dataRef[j], 1, frame->width*frame->height, ref);
rec = fread(dataTrash, 1, frame->width*frame->height/2, ref);
//fseek(ref, frame->width*frame->height / 2, SEEK_CUR); //skip others except Y channel
if (rec != frame->width*frame->height/2) {
cout << "error" << endl;
return NULL;
}
}
for (int k = 0; k < files_count; k++) {
for (int j = 0; j < frame->frame_count % CHUNK_SIZE; j++) {
rec = fread(data[k][j], 1, frame->width*frame->height, streams[k]);
fread(dataTrash, 1, frame->width*frame->height/2, streams[k]);
//fseek(streams[k], frame->width*frame->height / 2, SEEK_CUR); //skip others except Y channel
if (rec != frame->width*frame->height) {
cout << "error" << endl;
return NULL;
}
if (string(type) == string("SSIM")) results[k][frame->frame_count - frame->frame_count % CHUNK_SIZE + j] = countSSIM(dataRef[j], data[k][j], frame->width*frame->height, frame->width);
else results[k][frame->frame_count - frame->frame_count % CHUNK_SIZE + j] = countPSNR(dataRef[j], data[k][j], frame->width*frame->height);
//cout << frame2->frame_count - frame2->frame_count % CHUNK_SIZE + j << " " << results[frame2->frame_count - frame2->frame_count % CHUNK_SIZE + j] << endl;
}
}
//delete frame->data;
//delete frame;
return results;
}
int readFromFile(unsigned char *& data, int count, FILE * file) {
int rec = fread(data, 1, count, file);
if (rec != count) {
cout << "Error while reading from file" << endl;
exit(-1);
}
return rec;
}
int main(int argc, char ** argv) {
/*int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %s has compute capability %d.%d.\n", deviceProp.name, deviceProp.major, deviceProp.minor);
}*/
string reference;
string file1, file2;
string type;
int gpu = 0;
string * files = new string[MAX_FILES];
int files_count = 0;
//cout<<argv<<endl;
if (argc < 6) { // Check the value of argc. If not enough parameters have been passed, inform user and exit.
cout << argc << endl;
cout << "Usage is -r <reference file> -in <first video to compare> -in <second video to compare> [-type <STVSSIM, SSIM or PSNR>] [-ffpath <path to folder with ffmpeg and ffprobe executables>] [CUDA] \n"; // Inform the user of how to use the program
//std::cin.get();
exit(0);
}
else { // if we got enough parameters...
//std::cout << argv[0];
for (int i = 1; i < argc; i++) { /* We will iterate over argv[] to get the parameters stored inside.
* Note that we're starting on 1 because we don't need to know the
* path of the program, which is stored in argv[0] */
if (i + 1 != argc) { // Check that we haven't finished parsing already
if (string(argv[i]) == string("-r")) {
// We know the next argument *should* be the filename:
reference = argv[i + 1];
//std::cout << reference << endl;
}
else if (string(argv[i]) == string("-in")) {
files[files_count] = string(argv[i + 1]);
//cout << files[files_count].c_str() << endl;
files_count++;
}
/*else if (string(argv[i]) == string("-in2")) {
file2 = string(argv[i + 1]);
}*/
else if (string(argv[i]) == string("-type")) {
type = string(argv[i + 1]);
}
else if (string(argv[i]) == string("-threads")) {
CHUNK_SIZE = atoi(argv[i + 1]);
cout<<"Threads: "<<CHUNK_SIZE<<endl;
}
else if (string(argv[i]) == string("-CUDA_threads")) {
THREADS = atoi(argv[i + 1]);
cout<<"CUDA threads: "<<THREADS<<endl;
}
else if (string(argv[i]) == string("-ffpath")) {
FF_PATH = string(argv[i + 1]);
}
else if (string(argv[i]) == string("CUDA")) { //we will use CUDA computation
gpu = 1;
}
else {
//cout << "Not enough or invalid arguments, please try again.\n";
//Sleep(2000);
//exit(0);
}
//std::cout << argv[i] << " ";
}
else if (i != argc){
if (string(argv[i]) == string("CUDA")) { //we will use CUDA computation
gpu = 1;
}
}
}
}
const int MAX_BUFFER = 2048000;
PictureData * frame;
FILE ** streams;
FILE * ref;
streams = new FILE *[files_count];
double ** results = new double *[files_count];
frame = getVideoInfo(reference);
ref = startFFmpeg(reference);
for (int i = 0; i < files_count; i++) {
//frame = getVideoInfo(files[i]);
streams[i] = startFFmpeg(files[i]);
results[i] = new double[frame->frame_count];
}
int rec;
double *sum = new double[files_count];
int * frames = new int[files_count];
for (int i = 0; i < files_count; i++) {
frames[i] = frame->frame_count;
sum[i] = 0;
}
chrono::high_resolution_clock::time_point t1 = chrono::high_resolution_clock::now();
if (gpu == 1) {
if (string(type) == string("STVSSIM")) {
cout << "stvssim CUDA" << endl;
countMetricSTVSSIM_CUDA(streams, ref, files_count, frame, results, frames);
}
else {
cout << "SSIM CUDA" << endl;
countCUDA(streams, ref, files_count, frame, type, results);
}
}
else if (string(type) == string("STVSSIM")) {
cout << "stvssim CPU" << endl;
countMetricSTVSSIM(streams, ref, files_count, frame, results, frames);
delete streams; //?
}
else {
cout << "SSIM/PSNR CPU" << endl;
countMetric(streams, ref, files_count, frame, type, results); //SSIM, PSNR
}
chrono::high_resolution_clock::time_point t2 = chrono::high_resolution_clock::now();
chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
std::cout << "It took me " << time_span.count() << " seconds.";
std::cout << std::endl;
for (int j = 0; j < files_count; j++) {
cout << "file number: " << j << "\t";
}
cout<<endl;
for (int i = 0; i < frames[0]; i++) {
//cout << i<<"\t";
for (int j = 0; j < files_count; j++) {
//cout <<results[j][i] << "\t";
if (std::isfinite(results[j][i]))
sum[j] += results[j][i];
else frames[j]--;
}
//cout<<endl;
}
cout << "File" << "\t" << "AVG" << "\t" << "Median" << endl;
for (int i = 0; i < files_count; i++) {
cout <<i<<"\t" << sum[i] / frames[i];
qsort(results[i], frames[i], sizeof(double), compare);
cout << "\t" << results[i][frames[i] / 2] << endl;
}
}
|
4945406585af1ea84bcfb105a9540abfa42a8682.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 8
#define TC 16
#define C 64
#define N 32
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[32];
__shared__ float pad_temp_shared[384];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[32];
float kernel_shared_local[4];
compute_local[(0)] = 0.000000e+00f;
compute_local[(16)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(20)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(24)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(28)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(17)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(21)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(25)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(29)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(18)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(22)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(26)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(30)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(19)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(23)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(27)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
compute_local[(31)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 48))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 57))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 1))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 56))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 2))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 55))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 3))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 54))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 4))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 53))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 5))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 52))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 6))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 7))] = data[((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 8))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 1))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 9))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 2))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 10))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 11))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 4))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 12))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 55))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 13))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 56))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 14))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 57))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 15))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 58))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 16))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 59))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 17))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 60))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 18))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 111))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 19))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 112))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 20))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 113))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 21))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 114))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 22))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 115))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 23))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 116))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 24))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3079))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 25))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3080))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 26))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3081))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 27))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3082))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 28))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3083))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 29))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3084))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 30))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3135))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 31))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3136))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 32))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3137))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 33))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3138))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 34))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3139))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 35))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3140))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 36))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3191))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 37))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3192))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 38))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3193))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 39))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3194))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 40))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3195))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 41))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3196))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 42))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3247))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 43))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3248))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 44))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3249))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 45))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3250))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 46))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3251))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 47))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3252))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 96))] = kernel[(((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)))];
kernel_shared[(((((int)threadIdx.z) * 96) + 1))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((int)threadIdx.z) * 96) + 2))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((int)threadIdx.z) * 96) + 3))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 9))];
kernel_shared[(((((int)threadIdx.z) * 96) + 4))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 10))];
kernel_shared[(((((int)threadIdx.z) * 96) + 5))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 11))];
kernel_shared[(((((int)threadIdx.z) * 96) + 6))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 18))];
kernel_shared[(((((int)threadIdx.z) * 96) + 7))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 19))];
kernel_shared[(((((int)threadIdx.z) * 96) + 8))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 20))];
kernel_shared[(((((int)threadIdx.z) * 96) + 9))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 27))];
kernel_shared[(((((int)threadIdx.z) * 96) + 10))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 28))];
kernel_shared[(((((int)threadIdx.z) * 96) + 11))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 29))];
kernel_shared[(((((int)threadIdx.z) * 96) + 12))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 36))];
kernel_shared[(((((int)threadIdx.z) * 96) + 13))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 37))];
kernel_shared[(((((int)threadIdx.z) * 96) + 14))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 38))];
kernel_shared[(((((int)threadIdx.z) * 96) + 15))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 45))];
kernel_shared[(((((int)threadIdx.z) * 96) + 16))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 46))];
kernel_shared[(((((int)threadIdx.z) * 96) + 17))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 47))];
kernel_shared[(((((int)threadIdx.z) * 96) + 18))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 54))];
kernel_shared[(((((int)threadIdx.z) * 96) + 19))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 55))];
kernel_shared[(((((int)threadIdx.z) * 96) + 20))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 56))];
kernel_shared[(((((int)threadIdx.z) * 96) + 21))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 63))];
kernel_shared[(((((int)threadIdx.z) * 96) + 22))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 64))];
kernel_shared[(((((int)threadIdx.z) * 96) + 23))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 65))];
kernel_shared[(((((int)threadIdx.z) * 96) + 24))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 72))];
kernel_shared[(((((int)threadIdx.z) * 96) + 25))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 73))];
kernel_shared[(((((int)threadIdx.z) * 96) + 26))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 74))];
kernel_shared[(((((int)threadIdx.z) * 96) + 27))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 81))];
kernel_shared[(((((int)threadIdx.z) * 96) + 28))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 82))];
kernel_shared[(((((int)threadIdx.z) * 96) + 29))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 83))];
kernel_shared[(((((int)threadIdx.z) * 96) + 30))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 90))];
kernel_shared[(((((int)threadIdx.z) * 96) + 31))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 91))];
kernel_shared[(((((int)threadIdx.z) * 96) + 32))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 92))];
kernel_shared[(((((int)threadIdx.z) * 96) + 33))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 99))];
kernel_shared[(((((int)threadIdx.z) * 96) + 34))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 100))];
kernel_shared[(((((int)threadIdx.z) * 96) + 35))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 101))];
kernel_shared[(((((int)threadIdx.z) * 96) + 36))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 108))];
kernel_shared[(((((int)threadIdx.z) * 96) + 37))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 109))];
kernel_shared[(((((int)threadIdx.z) * 96) + 38))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 110))];
kernel_shared[(((((int)threadIdx.z) * 96) + 39))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 117))];
kernel_shared[(((((int)threadIdx.z) * 96) + 40))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 118))];
kernel_shared[(((((int)threadIdx.z) * 96) + 41))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 119))];
kernel_shared[(((((int)threadIdx.z) * 96) + 42))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 126))];
kernel_shared[(((((int)threadIdx.z) * 96) + 43))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 127))];
kernel_shared[(((((int)threadIdx.z) * 96) + 44))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 128))];
kernel_shared[(((((int)threadIdx.z) * 96) + 45))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 135))];
kernel_shared[(((((int)threadIdx.z) * 96) + 46))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 136))];
kernel_shared[(((((int)threadIdx.z) * 96) + 47))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 137))];
kernel_shared[(((((int)threadIdx.z) * 96) + 48))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 576))];
kernel_shared[(((((int)threadIdx.z) * 96) + 49))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 577))];
kernel_shared[(((((int)threadIdx.z) * 96) + 50))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 578))];
kernel_shared[(((((int)threadIdx.z) * 96) + 51))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 585))];
kernel_shared[(((((int)threadIdx.z) * 96) + 52))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 586))];
kernel_shared[(((((int)threadIdx.z) * 96) + 53))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 587))];
kernel_shared[(((((int)threadIdx.z) * 96) + 54))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 594))];
kernel_shared[(((((int)threadIdx.z) * 96) + 55))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 595))];
kernel_shared[(((((int)threadIdx.z) * 96) + 56))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 596))];
kernel_shared[(((((int)threadIdx.z) * 96) + 57))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 603))];
kernel_shared[(((((int)threadIdx.z) * 96) + 58))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 604))];
kernel_shared[(((((int)threadIdx.z) * 96) + 59))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 605))];
kernel_shared[(((((int)threadIdx.z) * 96) + 60))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 612))];
kernel_shared[(((((int)threadIdx.z) * 96) + 61))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 613))];
kernel_shared[(((((int)threadIdx.z) * 96) + 62))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 614))];
kernel_shared[(((((int)threadIdx.z) * 96) + 63))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 621))];
kernel_shared[(((((int)threadIdx.z) * 96) + 64))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 622))];
kernel_shared[(((((int)threadIdx.z) * 96) + 65))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 623))];
kernel_shared[(((((int)threadIdx.z) * 96) + 66))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 630))];
kernel_shared[(((((int)threadIdx.z) * 96) + 67))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 631))];
kernel_shared[(((((int)threadIdx.z) * 96) + 68))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 632))];
kernel_shared[(((((int)threadIdx.z) * 96) + 69))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 639))];
kernel_shared[(((((int)threadIdx.z) * 96) + 70))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 640))];
kernel_shared[(((((int)threadIdx.z) * 96) + 71))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 641))];
kernel_shared[(((((int)threadIdx.z) * 96) + 72))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 648))];
kernel_shared[(((((int)threadIdx.z) * 96) + 73))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 649))];
kernel_shared[(((((int)threadIdx.z) * 96) + 74))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 650))];
kernel_shared[(((((int)threadIdx.z) * 96) + 75))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 657))];
kernel_shared[(((((int)threadIdx.z) * 96) + 76))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 658))];
kernel_shared[(((((int)threadIdx.z) * 96) + 77))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 659))];
kernel_shared[(((((int)threadIdx.z) * 96) + 78))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 666))];
kernel_shared[(((((int)threadIdx.z) * 96) + 79))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 667))];
kernel_shared[(((((int)threadIdx.z) * 96) + 80))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 668))];
kernel_shared[(((((int)threadIdx.z) * 96) + 81))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 675))];
kernel_shared[(((((int)threadIdx.z) * 96) + 82))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 676))];
kernel_shared[(((((int)threadIdx.z) * 96) + 83))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 677))];
kernel_shared[(((((int)threadIdx.z) * 96) + 84))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 684))];
kernel_shared[(((((int)threadIdx.z) * 96) + 85))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 685))];
kernel_shared[(((((int)threadIdx.z) * 96) + 86))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 686))];
kernel_shared[(((((int)threadIdx.z) * 96) + 87))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 693))];
kernel_shared[(((((int)threadIdx.z) * 96) + 88))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 694))];
kernel_shared[(((((int)threadIdx.z) * 96) + 89))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 695))];
kernel_shared[(((((int)threadIdx.z) * 96) + 90))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 702))];
kernel_shared[(((((int)threadIdx.z) * 96) + 91))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 703))];
kernel_shared[(((((int)threadIdx.z) * 96) + 92))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 704))];
kernel_shared[(((((int)threadIdx.z) * 96) + 93))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 711))];
kernel_shared[(((((int)threadIdx.z) * 96) + 94))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 712))];
kernel_shared[(((((int)threadIdx.z) * 96) + 95))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 713))];
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((rc_inner_outer * 48))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 1))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 6))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 7))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 12))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 13))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 18))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 19))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 24))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 25))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 30))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 31))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 36))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 37))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 42))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 43))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 384))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 3))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 387))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 48) + 1))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 4))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 7))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 10))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 13))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 16))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 19))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 22))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 25))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 28))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 31))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 34))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 37))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 40))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 43))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 46))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 1))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 385))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 4))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 388))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 4))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 5))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 10))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 11))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 16))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 17))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 22))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 23))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 28))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 29))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 34))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 35))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 40))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 41))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 46))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 47))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 2))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 386))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 5))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 389))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
}
}
}
compute[(((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25088))] = compute_local[(16)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 1))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25089))] = compute_local[(20)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 2))] = compute_local[(8)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25090))] = compute_local[(24)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 3))] = compute_local[(12)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25091))] = compute_local[(28)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 56))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25144))] = compute_local[(17)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 57))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25145))] = compute_local[(21)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 58))] = compute_local[(9)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25146))] = compute_local[(25)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 59))] = compute_local[(13)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25147))] = compute_local[(29)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 112))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25200))] = compute_local[(18)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 113))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25201))] = compute_local[(22)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 114))] = compute_local[(10)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25202))] = compute_local[(26)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 115))] = compute_local[(14)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25203))] = compute_local[(30)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 168))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25256))] = compute_local[(19)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 169))] = compute_local[(7)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25257))] = compute_local[(23)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 170))] = compute_local[(11)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25258))] = compute_local[(27)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 171))] = compute_local[(15)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25259))] = compute_local[(31)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 6:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 6; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 7:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 7; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 8:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 8; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 9]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 9]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 9]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,14,2);
dim3 block(1,1,8);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| 4945406585af1ea84bcfb105a9540abfa42a8682.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 8
#define TC 16
#define C 64
#define N 32
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[32];
__shared__ float pad_temp_shared[384];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[32];
float kernel_shared_local[4];
compute_local[(0)] = 0.000000e+00f;
compute_local[(16)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(20)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(24)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(28)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(17)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(21)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(25)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(29)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(18)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(22)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(26)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(30)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(19)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(23)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(27)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
compute_local[(31)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 48))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 57))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 1))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 56))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 2))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 55))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 3))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 54))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 4))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 53))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 5))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 52))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 6))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) - 1))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 7))] = data[((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 8))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 1))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 9))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 2))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 10))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 11))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 4))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 12))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 55))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 13))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 56))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 14))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 57))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 15))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 58))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 16))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 59))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 17))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 60))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 18))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 111))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 19))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 112))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 20))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 113))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 21))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 114))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 22))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 115))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 23))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 116))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 24))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3079))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 25))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3080))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 26))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3081))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 27))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3082))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 28))] = ((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3083))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 29))] = (((1 <= ((((int)blockIdx.y) * 4) + ry_outer)) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3084))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 30))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3135))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 31))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3136))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 32))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3137))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 33))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3138))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 34))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3139))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 35))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3140))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 36))] = ((1 <= ((int)blockIdx.x)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3191))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 37))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3192))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 38))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3193))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 39))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3194))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 40))] = data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3195))];
pad_temp_shared[(((((int)threadIdx.z) * 48) + 41))] = ((((int)blockIdx.x) < 13) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3196))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 42))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3247))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 43))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3248))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 44))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3249))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 45))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3250))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 46))] = ((((((int)blockIdx.y) * 4) + ry_outer) < 54) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3251))] : 0.000000e+00f);
pad_temp_shared[(((((int)threadIdx.z) * 48) + 47))] = (((((((int)blockIdx.y) * 4) + ry_outer) < 54) && (((int)blockIdx.x) < 13)) ? data[(((((((rc_outer * 50176) + (((int)threadIdx.z) * 6272)) + (((int)blockIdx.y) * 224)) + (ry_outer * 56)) + (((int)blockIdx.x) * 4)) + 3252))] : 0.000000e+00f);
kernel_shared[((((int)threadIdx.z) * 96))] = kernel[(((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)))];
kernel_shared[(((((int)threadIdx.z) * 96) + 1))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 1))];
kernel_shared[(((((int)threadIdx.z) * 96) + 2))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 2))];
kernel_shared[(((((int)threadIdx.z) * 96) + 3))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 9))];
kernel_shared[(((((int)threadIdx.z) * 96) + 4))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 10))];
kernel_shared[(((((int)threadIdx.z) * 96) + 5))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 11))];
kernel_shared[(((((int)threadIdx.z) * 96) + 6))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 18))];
kernel_shared[(((((int)threadIdx.z) * 96) + 7))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 19))];
kernel_shared[(((((int)threadIdx.z) * 96) + 8))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 20))];
kernel_shared[(((((int)threadIdx.z) * 96) + 9))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 27))];
kernel_shared[(((((int)threadIdx.z) * 96) + 10))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 28))];
kernel_shared[(((((int)threadIdx.z) * 96) + 11))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 29))];
kernel_shared[(((((int)threadIdx.z) * 96) + 12))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 36))];
kernel_shared[(((((int)threadIdx.z) * 96) + 13))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 37))];
kernel_shared[(((((int)threadIdx.z) * 96) + 14))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 38))];
kernel_shared[(((((int)threadIdx.z) * 96) + 15))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 45))];
kernel_shared[(((((int)threadIdx.z) * 96) + 16))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 46))];
kernel_shared[(((((int)threadIdx.z) * 96) + 17))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 47))];
kernel_shared[(((((int)threadIdx.z) * 96) + 18))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 54))];
kernel_shared[(((((int)threadIdx.z) * 96) + 19))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 55))];
kernel_shared[(((((int)threadIdx.z) * 96) + 20))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 56))];
kernel_shared[(((((int)threadIdx.z) * 96) + 21))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 63))];
kernel_shared[(((((int)threadIdx.z) * 96) + 22))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 64))];
kernel_shared[(((((int)threadIdx.z) * 96) + 23))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 65))];
kernel_shared[(((((int)threadIdx.z) * 96) + 24))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 72))];
kernel_shared[(((((int)threadIdx.z) * 96) + 25))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 73))];
kernel_shared[(((((int)threadIdx.z) * 96) + 26))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 74))];
kernel_shared[(((((int)threadIdx.z) * 96) + 27))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 81))];
kernel_shared[(((((int)threadIdx.z) * 96) + 28))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 82))];
kernel_shared[(((((int)threadIdx.z) * 96) + 29))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 83))];
kernel_shared[(((((int)threadIdx.z) * 96) + 30))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 90))];
kernel_shared[(((((int)threadIdx.z) * 96) + 31))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 91))];
kernel_shared[(((((int)threadIdx.z) * 96) + 32))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 92))];
kernel_shared[(((((int)threadIdx.z) * 96) + 33))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 99))];
kernel_shared[(((((int)threadIdx.z) * 96) + 34))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 100))];
kernel_shared[(((((int)threadIdx.z) * 96) + 35))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 101))];
kernel_shared[(((((int)threadIdx.z) * 96) + 36))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 108))];
kernel_shared[(((((int)threadIdx.z) * 96) + 37))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 109))];
kernel_shared[(((((int)threadIdx.z) * 96) + 38))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 110))];
kernel_shared[(((((int)threadIdx.z) * 96) + 39))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 117))];
kernel_shared[(((((int)threadIdx.z) * 96) + 40))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 118))];
kernel_shared[(((((int)threadIdx.z) * 96) + 41))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 119))];
kernel_shared[(((((int)threadIdx.z) * 96) + 42))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 126))];
kernel_shared[(((((int)threadIdx.z) * 96) + 43))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 127))];
kernel_shared[(((((int)threadIdx.z) * 96) + 44))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 128))];
kernel_shared[(((((int)threadIdx.z) * 96) + 45))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 135))];
kernel_shared[(((((int)threadIdx.z) * 96) + 46))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 136))];
kernel_shared[(((((int)threadIdx.z) * 96) + 47))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 137))];
kernel_shared[(((((int)threadIdx.z) * 96) + 48))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 576))];
kernel_shared[(((((int)threadIdx.z) * 96) + 49))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 577))];
kernel_shared[(((((int)threadIdx.z) * 96) + 50))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 578))];
kernel_shared[(((((int)threadIdx.z) * 96) + 51))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 585))];
kernel_shared[(((((int)threadIdx.z) * 96) + 52))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 586))];
kernel_shared[(((((int)threadIdx.z) * 96) + 53))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 587))];
kernel_shared[(((((int)threadIdx.z) * 96) + 54))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 594))];
kernel_shared[(((((int)threadIdx.z) * 96) + 55))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 595))];
kernel_shared[(((((int)threadIdx.z) * 96) + 56))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 596))];
kernel_shared[(((((int)threadIdx.z) * 96) + 57))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 603))];
kernel_shared[(((((int)threadIdx.z) * 96) + 58))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 604))];
kernel_shared[(((((int)threadIdx.z) * 96) + 59))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 605))];
kernel_shared[(((((int)threadIdx.z) * 96) + 60))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 612))];
kernel_shared[(((((int)threadIdx.z) * 96) + 61))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 613))];
kernel_shared[(((((int)threadIdx.z) * 96) + 62))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 614))];
kernel_shared[(((((int)threadIdx.z) * 96) + 63))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 621))];
kernel_shared[(((((int)threadIdx.z) * 96) + 64))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 622))];
kernel_shared[(((((int)threadIdx.z) * 96) + 65))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 623))];
kernel_shared[(((((int)threadIdx.z) * 96) + 66))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 630))];
kernel_shared[(((((int)threadIdx.z) * 96) + 67))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 631))];
kernel_shared[(((((int)threadIdx.z) * 96) + 68))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 632))];
kernel_shared[(((((int)threadIdx.z) * 96) + 69))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 639))];
kernel_shared[(((((int)threadIdx.z) * 96) + 70))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 640))];
kernel_shared[(((((int)threadIdx.z) * 96) + 71))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 641))];
kernel_shared[(((((int)threadIdx.z) * 96) + 72))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 648))];
kernel_shared[(((((int)threadIdx.z) * 96) + 73))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 649))];
kernel_shared[(((((int)threadIdx.z) * 96) + 74))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 650))];
kernel_shared[(((((int)threadIdx.z) * 96) + 75))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 657))];
kernel_shared[(((((int)threadIdx.z) * 96) + 76))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 658))];
kernel_shared[(((((int)threadIdx.z) * 96) + 77))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 659))];
kernel_shared[(((((int)threadIdx.z) * 96) + 78))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 666))];
kernel_shared[(((((int)threadIdx.z) * 96) + 79))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 667))];
kernel_shared[(((((int)threadIdx.z) * 96) + 80))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 668))];
kernel_shared[(((((int)threadIdx.z) * 96) + 81))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 675))];
kernel_shared[(((((int)threadIdx.z) * 96) + 82))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 676))];
kernel_shared[(((((int)threadIdx.z) * 96) + 83))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 677))];
kernel_shared[(((((int)threadIdx.z) * 96) + 84))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 684))];
kernel_shared[(((((int)threadIdx.z) * 96) + 85))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 685))];
kernel_shared[(((((int)threadIdx.z) * 96) + 86))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 686))];
kernel_shared[(((((int)threadIdx.z) * 96) + 87))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 693))];
kernel_shared[(((((int)threadIdx.z) * 96) + 88))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 694))];
kernel_shared[(((((int)threadIdx.z) * 96) + 89))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 695))];
kernel_shared[(((((int)threadIdx.z) * 96) + 90))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 702))];
kernel_shared[(((((int)threadIdx.z) * 96) + 91))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 703))];
kernel_shared[(((((int)threadIdx.z) * 96) + 92))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 704))];
kernel_shared[(((((int)threadIdx.z) * 96) + 93))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 711))];
kernel_shared[(((((int)threadIdx.z) * 96) + 94))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 712))];
kernel_shared[(((((int)threadIdx.z) * 96) + 95))] = kernel[((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 1152)) + (rc_outer * 144)) + (ry_outer * 3)) + 713))];
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((rc_inner_outer * 48))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 1))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 6))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 7))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 12))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 13))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 18))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 19))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 24))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 25))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 30))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 31))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 36))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 37))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 42))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 43))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 384))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 3))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 387))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 48) + 1))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 4))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 7))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 10))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 13))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 16))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 19))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 22))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 25))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 28))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 31))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 34))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 37))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 40))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 43))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 46))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 1))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 385))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 4))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 388))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 48) + 2))];
pad_temp_shared_local[(8)] = pad_temp_shared[(((rc_inner_outer * 48) + 3))];
pad_temp_shared_local[(16)] = pad_temp_shared[(((rc_inner_outer * 48) + 4))];
pad_temp_shared_local[(24)] = pad_temp_shared[(((rc_inner_outer * 48) + 5))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((rc_inner_outer * 48) + 8))];
pad_temp_shared_local[(9)] = pad_temp_shared[(((rc_inner_outer * 48) + 9))];
pad_temp_shared_local[(17)] = pad_temp_shared[(((rc_inner_outer * 48) + 10))];
pad_temp_shared_local[(25)] = pad_temp_shared[(((rc_inner_outer * 48) + 11))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((rc_inner_outer * 48) + 14))];
pad_temp_shared_local[(10)] = pad_temp_shared[(((rc_inner_outer * 48) + 15))];
pad_temp_shared_local[(18)] = pad_temp_shared[(((rc_inner_outer * 48) + 16))];
pad_temp_shared_local[(26)] = pad_temp_shared[(((rc_inner_outer * 48) + 17))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((rc_inner_outer * 48) + 20))];
pad_temp_shared_local[(11)] = pad_temp_shared[(((rc_inner_outer * 48) + 21))];
pad_temp_shared_local[(19)] = pad_temp_shared[(((rc_inner_outer * 48) + 22))];
pad_temp_shared_local[(27)] = pad_temp_shared[(((rc_inner_outer * 48) + 23))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((rc_inner_outer * 48) + 26))];
pad_temp_shared_local[(12)] = pad_temp_shared[(((rc_inner_outer * 48) + 27))];
pad_temp_shared_local[(20)] = pad_temp_shared[(((rc_inner_outer * 48) + 28))];
pad_temp_shared_local[(28)] = pad_temp_shared[(((rc_inner_outer * 48) + 29))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((rc_inner_outer * 48) + 32))];
pad_temp_shared_local[(13)] = pad_temp_shared[(((rc_inner_outer * 48) + 33))];
pad_temp_shared_local[(21)] = pad_temp_shared[(((rc_inner_outer * 48) + 34))];
pad_temp_shared_local[(29)] = pad_temp_shared[(((rc_inner_outer * 48) + 35))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((rc_inner_outer * 48) + 38))];
pad_temp_shared_local[(14)] = pad_temp_shared[(((rc_inner_outer * 48) + 39))];
pad_temp_shared_local[(22)] = pad_temp_shared[(((rc_inner_outer * 48) + 40))];
pad_temp_shared_local[(30)] = pad_temp_shared[(((rc_inner_outer * 48) + 41))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((rc_inner_outer * 48) + 44))];
pad_temp_shared_local[(15)] = pad_temp_shared[(((rc_inner_outer * 48) + 45))];
pad_temp_shared_local[(23)] = pad_temp_shared[(((rc_inner_outer * 48) + 46))];
pad_temp_shared_local[(31)] = pad_temp_shared[(((rc_inner_outer * 48) + 47))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 2))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 386))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 5))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 389))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(2)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(0)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(16)] * kernel_shared_local[(2)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(0)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(24)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(2)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(0)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(17)] * kernel_shared_local[(2)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(0)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(25)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(2)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(0)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(18)] * kernel_shared_local[(2)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(0)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(26)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(2)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(0)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(19)] * kernel_shared_local[(2)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(0)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(27)] * kernel_shared_local[(2)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(16)] = (compute_local[(16)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(1)]));
compute_local[(20)] = (compute_local[(20)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(3)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(1)]));
compute_local[(24)] = (compute_local[(24)] + (pad_temp_shared_local[(20)] * kernel_shared_local[(3)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(1)]));
compute_local[(28)] = (compute_local[(28)] + (pad_temp_shared_local[(28)] * kernel_shared_local[(3)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)]));
compute_local[(17)] = (compute_local[(17)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(1)]));
compute_local[(21)] = (compute_local[(21)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(3)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(1)]));
compute_local[(25)] = (compute_local[(25)] + (pad_temp_shared_local[(21)] * kernel_shared_local[(3)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(1)]));
compute_local[(29)] = (compute_local[(29)] + (pad_temp_shared_local[(29)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(18)] = (compute_local[(18)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(1)]));
compute_local[(22)] = (compute_local[(22)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(3)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(1)]));
compute_local[(26)] = (compute_local[(26)] + (pad_temp_shared_local[(22)] * kernel_shared_local[(3)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(1)]));
compute_local[(30)] = (compute_local[(30)] + (pad_temp_shared_local[(30)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)]));
compute_local[(19)] = (compute_local[(19)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(1)]));
compute_local[(23)] = (compute_local[(23)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(3)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(1)]));
compute_local[(27)] = (compute_local[(27)] + (pad_temp_shared_local[(23)] * kernel_shared_local[(3)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(1)]));
compute_local[(31)] = (compute_local[(31)] + (pad_temp_shared_local[(31)] * kernel_shared_local[(3)]));
}
}
}
compute[(((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)))] = compute_local[(0)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25088))] = compute_local[(16)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 1))] = compute_local[(4)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25089))] = compute_local[(20)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 2))] = compute_local[(8)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25090))] = compute_local[(24)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 3))] = compute_local[(12)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25091))] = compute_local[(28)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 56))] = compute_local[(1)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25144))] = compute_local[(17)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 57))] = compute_local[(5)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25145))] = compute_local[(21)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 58))] = compute_local[(9)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25146))] = compute_local[(25)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 59))] = compute_local[(13)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25147))] = compute_local[(29)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 112))] = compute_local[(2)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25200))] = compute_local[(18)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 113))] = compute_local[(6)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25201))] = compute_local[(22)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 114))] = compute_local[(10)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25202))] = compute_local[(26)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 115))] = compute_local[(14)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25203))] = compute_local[(30)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 168))] = compute_local[(3)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25256))] = compute_local[(19)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 169))] = compute_local[(7)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25257))] = compute_local[(23)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 170))] = compute_local[(11)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25258))] = compute_local[(27)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 171))] = compute_local[(15)];
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 3136)) + (((int)blockIdx.y) * 224)) + (((int)blockIdx.x) * 4)) + 25259))] = compute_local[(31)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 6:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 6; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 7:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 7; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 8:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 8; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[0];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[1];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 7]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 8]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 9]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[3];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[4];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 7]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 8]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 9]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[6];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[7];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 7]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 8]*data_array[8];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 9]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,14,2);
dim3 block(1,1,8);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
afd4c2608bd98c1842c911c91b4f3084dfc8cec5.hip | // !!! This is a file automatically generated by hipify!!!
#include <pthread.h>
//This method will let the kernel deallocate all the memory that it acquired in
// kernel_setup and also lets the kernel write to its output file.
void kernel_finish(int kernel, hipStream_t stream, char * filename, void *setupResult )
{
if(kernel==1) sleep_finish(stream, filename, setupResult);
if(kernel==2) matMul_finish(stream, filename, setupResult);
}
struct CleanerRecord
{
int BatchSize;
hipStream_t *Streams;
char **InputFiles;
char **OutputFiles;
int *Kernels;
void **SetupResults;
pthread_mutex_t Lock;
};
CleanerRecord *makeCleanerRecord(int batchSize, hipStream_t *streams, char **inputFiles,
char **outputFiles, int *kernels, void **setupResults, pthread_mutex_t lock)
{
CleanerRecord *r = (CleanerRecord *) malloc (sizeof(struct CleanerRecord));
r->BatchSize = batchSize;
r->Streams = streams;
r->InputFiles = inputFiles;
r->OutputFiles = outputFiles;
r->Kernels = kernels;
r->SetupResults = setupResults;
r->Lock = lock;
return r;
}
///////////////////////////////////////////////////////////////////////
// Cleaners's main
///////////////////////////////////////////////////////////////////////
void *cleaner_Main(void *params)
{
// FOR ALL MY COMMENTS q :: 1..batchSize
//open up params (its actually a CleanerRecord *)
CleanerRecord *r = (CleanerRecord *) params;
int batchSize = r->BatchSize;
hipStream_t *streams = r->Streams;
char **inputFiles = r->InputFiles;
char **outputFiles = r->OutputFiles;
int *kernels = r->Kernels;
void **setupResults = r->SetupResults;
pthread_mutex_t lock = r->Lock;
pthread_mutex_lock(&lock);
//call kernel_finish for each kernel
for(int q=0; q<batchSize; q++){
kernel_finish(kernels[q], streams[q], outputFiles[q], setupResults[q]);
}
//Synchronize with each streams[q]
for(int q=0; q<batchSize; q++) hipStreamSynchronize(streams[q]);
//deallocate
for(int q=0;q<batchSize;q++){
free(inputFiles[q]);
free(outputFiles[q]);
hipStreamDestroy(streams[q]);
}
pthread_mutex_unlock(&lock);
//free the arrays that we used;
free(streams);
free(kernels);
free(inputFiles);
free(outputFiles);
free(setupResults);
printf("cleaner finished a batch of kernels\n");
return 0;
}
| afd4c2608bd98c1842c911c91b4f3084dfc8cec5.cu | #include <pthread.h>
//This method will let the kernel deallocate all the memory that it acquired in
// kernel_setup and also lets the kernel write to its output file.
void kernel_finish(int kernel, cudaStream_t stream, char * filename, void *setupResult )
{
if(kernel==1) sleep_finish(stream, filename, setupResult);
if(kernel==2) matMul_finish(stream, filename, setupResult);
}
struct CleanerRecord
{
int BatchSize;
cudaStream_t *Streams;
char **InputFiles;
char **OutputFiles;
int *Kernels;
void **SetupResults;
pthread_mutex_t Lock;
};
CleanerRecord *makeCleanerRecord(int batchSize, cudaStream_t *streams, char **inputFiles,
char **outputFiles, int *kernels, void **setupResults, pthread_mutex_t lock)
{
CleanerRecord *r = (CleanerRecord *) malloc (sizeof(struct CleanerRecord));
r->BatchSize = batchSize;
r->Streams = streams;
r->InputFiles = inputFiles;
r->OutputFiles = outputFiles;
r->Kernels = kernels;
r->SetupResults = setupResults;
r->Lock = lock;
return r;
}
///////////////////////////////////////////////////////////////////////
// Cleaners's main
///////////////////////////////////////////////////////////////////////
void *cleaner_Main(void *params)
{
// FOR ALL MY COMMENTS q :: 1..batchSize
//open up params (its actually a CleanerRecord *)
CleanerRecord *r = (CleanerRecord *) params;
int batchSize = r->BatchSize;
cudaStream_t *streams = r->Streams;
char **inputFiles = r->InputFiles;
char **outputFiles = r->OutputFiles;
int *kernels = r->Kernels;
void **setupResults = r->SetupResults;
pthread_mutex_t lock = r->Lock;
pthread_mutex_lock(&lock);
//call kernel_finish for each kernel
for(int q=0; q<batchSize; q++){
kernel_finish(kernels[q], streams[q], outputFiles[q], setupResults[q]);
}
//Synchronize with each streams[q]
for(int q=0; q<batchSize; q++) cudaStreamSynchronize(streams[q]);
//deallocate
for(int q=0;q<batchSize;q++){
free(inputFiles[q]);
free(outputFiles[q]);
cudaStreamDestroy(streams[q]);
}
pthread_mutex_unlock(&lock);
//free the arrays that we used;
free(streams);
free(kernels);
free(inputFiles);
free(outputFiles);
free(setupResults);
printf("cleaner finished a batch of kernels\n");
return 0;
}
|
ced44fea52c434f3f7d546bbb1c7f4e41864367d.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_usage.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#ifdef __linux__
#include <rocm_smi/rocm_smi.h>
#endif
#define GIGABYTE 1073741824.0f
#define cudaSuccess_SAFE_CALL(function) \
do { \
if (function != hipSuccess) { \
bestDevice = -1; \
return -1; \
} \
} while (0)
#define CUDA_SUCCESS_SAFE_CALL(function) \
do { \
if (function != hipSuccess) { \
bestDevice = -1; \
return -1; \
} \
} while (0)
static int bestDevice = -1;
int findBestCUDACard() {
int nDevices;
const int verbose = 1;
// We only want to check for the best device once. Running
// analyze first before the simulation causes misreporting
// of resources and leads to the wrong gpu being selected
if (bestDevice != -1)
return bestDevice;
cudaSuccess_SAFE_CALL(hipGetDeviceCount(&nDevices));
float device_loads[nDevices];
float device_gflops[nDevices];
int device_used_mem[nDevices];
printf ("\n Number of CUDA cards detected : %d\n", nDevices);
// Loop through all the CUDA devices and pick the best one
for (int i = 0; i < nDevices; ++i) {
hipDeviceProp_t prop;
hipDevice_t device;
hipCtx_t context;
size_t freeMemory;
size_t totalMemory;
int usedMemory;
double GFLOPS;
int coresPerMP = 0, MPCount, IPC = 1;
int major, minor;
cudaSuccess_SAFE_CALL (hipGetDeviceProperties(&prop, i));
CUDA_SUCCESS_SAFE_CALL (hipDeviceGet(&device, i));
CUDA_SUCCESS_SAFE_CALL (hipCtxCreate(&context, 0, device));
CUDA_SUCCESS_SAFE_CALL (cuMemGetInfo(&freeMemory, &totalMemory));
usedMemory = totalMemory - freeMemory;
major = prop.major;
minor = prop.minor;
MPCount = prop.multiProcessorCount;
// This will need to be updated when new cards are released
// Data from https://en.wikipedia.org/wiki/CUDA from the
// Architecture specifications chart. IPC is equal to the
// "Number of instructions issued at once by scheduler"
switch (major) {
case 1:
coresPerMP = 8;
break;
case 2:
if (minor == 0)
coresPerMP = 32;
else if (minor == 1) {
coresPerMP = 48;
IPC = 2;
}
break;
case 3:
coresPerMP = 192;
IPC = 2;
break;
case 5:
coresPerMP = 128;
IPC = 2;
break;
case 6:
IPC = 2;
if (minor == 0)
coresPerMP = 64;
else
coresPerMP = 128;
break;
}
// GFLOPS = CUDA Cores * Clockspeed * Instructions per Clock
GFLOPS = MPCount * coresPerMP * (double)prop.clockRate / (1000.0 * 1000.0) * IPC;
// If the data for coresPerMP was not present, then this
// is a newer GPU and we will assume it is fast
if (coresPerMP == 0) {
GFLOPS = INT_MAX;
}
float gpu_load = 0.0f;
#ifdef __APPLE__
gpu_load = getGPUCoreUsage(i);
#endif
#ifdef __linux__
if (RSMI_STATUS_SUCCESS != nvmlInit())
printf("Failure to initialize NVML\n");
else {
nvmlUtilization_t gpuUtil;
uint32_t nvmlDevice;
unsigned int nvmlClock;
if (RSMI_STATUS_SUCCESS != nvmlDeviceGetHandleByIndex(nDevices-i-1, &nvmlDevice))
printf("Failure to find NVML device\n");
else {
nvmlDeviceGetUtilizationRates(nvmlDevice, &gpuUtil);
gpu_load = (float)gpuUtil.gpu;
nvmlDeviceGetClockInfo(nvmlDevice, NVML_CLOCK_SM, &nvmlClock);
}
}
//nvmlDeviceGetUtilizationRates(nvmlDevice, &gpuUtil);
#endif
#ifdef __WIN32
// TODO; Windows version not available at this time
#endif
device_loads[i] = gpu_load;
device_gflops[i] = GFLOPS;
device_used_mem[i] = usedMemory;
// Total used memory is used to guess if the device
// is already running a job
if (verbose) {
printf("\n Device Number : %d\n", i);
printf("\tDevice Name %s\n", prop.name);
if (coresPerMP != 0) {
printf("\tCUDA Cores %d\n", MPCount * coresPerMP);
printf("\tTFLOPS %.2f\n", GFLOPS/1000);
}
printf("\tClockspeed (GHz) %.3f\n", ((float)prop.clockRate) / 1000000.0f);
printf("\tTotal Memory (GB) %.2f\n", round(10.0f * ((float)totalMemory/GIGABYTE) / 10.0f));
printf("\tFree Memory (GB) %.2f\n", round(1000.0f * (float)freeMemory/GIGABYTE) / 1024.0f);
printf("\tGPU load %.2f%%\n", gpu_load);
}
}
if (nDevices == 1) {
bestDevice = 0;
return bestDevice;
}
// Figure out which GPU we want to use
float lowestLoad = 101;
bool nonzero_load = false;
// Pick the device that has the lowest load
for (int i = 0; i < nDevices; ++i) {
if (device_loads[i] > 0)
nonzero_load = true;
if (device_loads[i] < lowestLoad) {
lowestLoad = device_loads[i];
bestDevice = i;
}
}
if (nonzero_load)
return bestDevice;
bestDevice = 0;
// If none of the devices are in use, find the fastest device
float highestGFLOPS = 0;
bool highest_performance[nDevices];
for (int i = 0; i < nDevices; ++i) {
highest_performance[i] = false;
if (device_gflops[i] >= highestGFLOPS) {
highest_performance[i] = true;
if (device_gflops[i] == highestGFLOPS) {
for (int j = 0; j < i; ++j)
highest_performance[j] = false;
}
}
}
// From the list of fastest devices, find the one with
// the least amount of used memory
int lowestMem = device_used_mem[0];
bestDevice = 0;
for (int i = 1; i < nDevices; ++i) {
if (highest_performance[i] && device_used_mem[i] < lowestMem) {
lowestMem = device_used_mem[i];
bestDevice = i;
}
}
return bestDevice;
}
| ced44fea52c434f3f7d546bbb1c7f4e41864367d.cu | #include "gpu_usage.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#ifdef __linux__
#include <nvml.h>
#endif
#define GIGABYTE 1073741824.0f
#define cudaSuccess_SAFE_CALL(function) \
do { \
if (function != cudaSuccess) { \
bestDevice = -1; \
return -1; \
} \
} while (0)
#define CUDA_SUCCESS_SAFE_CALL(function) \
do { \
if (function != CUDA_SUCCESS) { \
bestDevice = -1; \
return -1; \
} \
} while (0)
static int bestDevice = -1;
int findBestCUDACard() {
int nDevices;
const int verbose = 1;
// We only want to check for the best device once. Running
// analyze first before the simulation causes misreporting
// of resources and leads to the wrong gpu being selected
if (bestDevice != -1)
return bestDevice;
cudaSuccess_SAFE_CALL(cudaGetDeviceCount(&nDevices));
float device_loads[nDevices];
float device_gflops[nDevices];
int device_used_mem[nDevices];
printf ("\n Number of CUDA cards detected : %d\n", nDevices);
// Loop through all the CUDA devices and pick the best one
for (int i = 0; i < nDevices; ++i) {
cudaDeviceProp prop;
CUdevice device;
CUcontext context;
size_t freeMemory;
size_t totalMemory;
int usedMemory;
double GFLOPS;
int coresPerMP = 0, MPCount, IPC = 1;
int major, minor;
cudaSuccess_SAFE_CALL (cudaGetDeviceProperties(&prop, i));
CUDA_SUCCESS_SAFE_CALL (cuDeviceGet(&device, i));
CUDA_SUCCESS_SAFE_CALL (cuCtxCreate(&context, 0, device));
CUDA_SUCCESS_SAFE_CALL (cuMemGetInfo(&freeMemory, &totalMemory));
usedMemory = totalMemory - freeMemory;
major = prop.major;
minor = prop.minor;
MPCount = prop.multiProcessorCount;
// This will need to be updated when new cards are released
// Data from https://en.wikipedia.org/wiki/CUDA from the
// Architecture specifications chart. IPC is equal to the
// "Number of instructions issued at once by scheduler"
switch (major) {
case 1:
coresPerMP = 8;
break;
case 2:
if (minor == 0)
coresPerMP = 32;
else if (minor == 1) {
coresPerMP = 48;
IPC = 2;
}
break;
case 3:
coresPerMP = 192;
IPC = 2;
break;
case 5:
coresPerMP = 128;
IPC = 2;
break;
case 6:
IPC = 2;
if (minor == 0)
coresPerMP = 64;
else
coresPerMP = 128;
break;
}
// GFLOPS = CUDA Cores * Clockspeed * Instructions per Clock
GFLOPS = MPCount * coresPerMP * (double)prop.clockRate / (1000.0 * 1000.0) * IPC;
// If the data for coresPerMP was not present, then this
// is a newer GPU and we will assume it is fast
if (coresPerMP == 0) {
GFLOPS = INT_MAX;
}
float gpu_load = 0.0f;
#ifdef __APPLE__
gpu_load = getGPUCoreUsage(i);
#endif
#ifdef __linux__
if (NVML_SUCCESS != nvmlInit())
printf("Failure to initialize NVML\n");
else {
nvmlUtilization_t gpuUtil;
nvmlDevice_t nvmlDevice;
unsigned int nvmlClock;
if (NVML_SUCCESS != nvmlDeviceGetHandleByIndex(nDevices-i-1, &nvmlDevice))
printf("Failure to find NVML device\n");
else {
nvmlDeviceGetUtilizationRates(nvmlDevice, &gpuUtil);
gpu_load = (float)gpuUtil.gpu;
nvmlDeviceGetClockInfo(nvmlDevice, NVML_CLOCK_SM, &nvmlClock);
}
}
//nvmlDeviceGetUtilizationRates(nvmlDevice, &gpuUtil);
#endif
#ifdef __WIN32
// TODO; Windows version not available at this time
#endif
device_loads[i] = gpu_load;
device_gflops[i] = GFLOPS;
device_used_mem[i] = usedMemory;
// Total used memory is used to guess if the device
// is already running a job
if (verbose) {
printf("\n Device Number : %d\n", i);
printf("\tDevice Name %s\n", prop.name);
if (coresPerMP != 0) {
printf("\tCUDA Cores %d\n", MPCount * coresPerMP);
printf("\tTFLOPS %.2f\n", GFLOPS/1000);
}
printf("\tClockspeed (GHz) %.3f\n", ((float)prop.clockRate) / 1000000.0f);
printf("\tTotal Memory (GB) %.2f\n", round(10.0f * ((float)totalMemory/GIGABYTE) / 10.0f));
printf("\tFree Memory (GB) %.2f\n", round(1000.0f * (float)freeMemory/GIGABYTE) / 1024.0f);
printf("\tGPU load %.2f%%\n", gpu_load);
}
}
if (nDevices == 1) {
bestDevice = 0;
return bestDevice;
}
// Figure out which GPU we want to use
float lowestLoad = 101;
bool nonzero_load = false;
// Pick the device that has the lowest load
for (int i = 0; i < nDevices; ++i) {
if (device_loads[i] > 0)
nonzero_load = true;
if (device_loads[i] < lowestLoad) {
lowestLoad = device_loads[i];
bestDevice = i;
}
}
if (nonzero_load)
return bestDevice;
bestDevice = 0;
// If none of the devices are in use, find the fastest device
float highestGFLOPS = 0;
bool highest_performance[nDevices];
for (int i = 0; i < nDevices; ++i) {
highest_performance[i] = false;
if (device_gflops[i] >= highestGFLOPS) {
highest_performance[i] = true;
if (device_gflops[i] == highestGFLOPS) {
for (int j = 0; j < i; ++j)
highest_performance[j] = false;
}
}
}
// From the list of fastest devices, find the one with
// the least amount of used memory
int lowestMem = device_used_mem[0];
bestDevice = 0;
for (int i = 1; i < nDevices; ++i) {
if (highest_performance[i] && device_used_mem[i] < lowestMem) {
lowestMem = device_used_mem[i];
bestDevice = i;
}
}
return bestDevice;
}
|
cfb363f8d95f06d347e84631b22a1b1b9c0e11c5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*/
#ifndef ADIOS2_HELPER_ADIOSCUDA_CU_
#define ADIOS2_HELPER_ADIOSCUDA_CU_
#include "adios2/common/ADIOSMacros.h"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include "adiosCUDA.h"
void adios2::helper::MemcpyGPUToBuffer(char *dst, const char *GPUbuffer,
size_t byteCount)
{
hipMemcpy(dst, GPUbuffer, byteCount, hipMemcpyDeviceToHost);
}
void adios2::helper::MemcpyBufferToGPU(char *GPUbuffer, const char *src,
size_t byteCount)
{
hipMemcpy(GPUbuffer, src, byteCount, hipMemcpyHostToDevice);
}
namespace
{
template <class T>
void CUDAMinMaxImpl(const T *values, const size_t size, T &min, T &max)
{
thrust::device_ptr<const T> dev_ptr(values);
auto res = thrust::minmax_element(dev_ptr, dev_ptr + size);
hipMemcpy(&min, thrust::raw_pointer_cast(res.first), sizeof(T),
hipMemcpyDeviceToHost);
hipMemcpy(&max, thrust::raw_pointer_cast(res.second), sizeof(T),
hipMemcpyDeviceToHost);
}
// types non supported on the device
void CUDAMinMaxImpl(const long double * /*values*/, const size_t /*size*/,
long double & /*min*/, long double & /*max*/)
{
}
void CUDAMinMaxImpl(const std::complex<float> * /*values*/,
const size_t /*size*/, std::complex<float> & /*min*/,
std::complex<float> & /*max*/)
{
}
void CUDAMinMaxImpl(const std::complex<double> * /*values*/,
const size_t /*size*/, std::complex<double> & /*min*/,
std::complex<double> & /*max*/)
{
}
}
bool adios2::helper::IsGPUbuffer(const void *ptr)
{
hipPointerAttribute_t attr;
hipPointerGetAttributes(&attr, ptr);
if (attr.type == hipMemoryTypeDevice)
{
return true;
}
return false;
}
template <class T>
void adios2::helper::GPUMinMax(const T *values, const size_t size, T &min,
T &max)
{
CUDAMinMaxImpl(values, size, min, max);
}
#define declare_type(T) \
template void adios2::helper::GPUMinMax( \
const T *values, const size_t size, T &min, T &max);
ADIOS2_FOREACH_PRIMITIVE_STDTYPE_1ARG(declare_type)
#undef declare_type
#endif /* ADIOS2_HELPER_ADIOSCUDA_CU_ */
| cfb363f8d95f06d347e84631b22a1b1b9c0e11c5.cu | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*/
#ifndef ADIOS2_HELPER_ADIOSCUDA_CU_
#define ADIOS2_HELPER_ADIOSCUDA_CU_
#include "adios2/common/ADIOSMacros.h"
#include <cuda_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#include "adiosCUDA.h"
void adios2::helper::MemcpyGPUToBuffer(char *dst, const char *GPUbuffer,
size_t byteCount)
{
cudaMemcpy(dst, GPUbuffer, byteCount, cudaMemcpyDeviceToHost);
}
void adios2::helper::MemcpyBufferToGPU(char *GPUbuffer, const char *src,
size_t byteCount)
{
cudaMemcpy(GPUbuffer, src, byteCount, cudaMemcpyHostToDevice);
}
namespace
{
template <class T>
void CUDAMinMaxImpl(const T *values, const size_t size, T &min, T &max)
{
thrust::device_ptr<const T> dev_ptr(values);
auto res = thrust::minmax_element(dev_ptr, dev_ptr + size);
cudaMemcpy(&min, thrust::raw_pointer_cast(res.first), sizeof(T),
cudaMemcpyDeviceToHost);
cudaMemcpy(&max, thrust::raw_pointer_cast(res.second), sizeof(T),
cudaMemcpyDeviceToHost);
}
// types non supported on the device
void CUDAMinMaxImpl(const long double * /*values*/, const size_t /*size*/,
long double & /*min*/, long double & /*max*/)
{
}
void CUDAMinMaxImpl(const std::complex<float> * /*values*/,
const size_t /*size*/, std::complex<float> & /*min*/,
std::complex<float> & /*max*/)
{
}
void CUDAMinMaxImpl(const std::complex<double> * /*values*/,
const size_t /*size*/, std::complex<double> & /*min*/,
std::complex<double> & /*max*/)
{
}
}
bool adios2::helper::IsGPUbuffer(const void *ptr)
{
cudaPointerAttributes attr;
cudaPointerGetAttributes(&attr, ptr);
if (attr.type == cudaMemoryTypeDevice)
{
return true;
}
return false;
}
template <class T>
void adios2::helper::GPUMinMax(const T *values, const size_t size, T &min,
T &max)
{
CUDAMinMaxImpl(values, size, min, max);
}
#define declare_type(T) \
template void adios2::helper::GPUMinMax( \
const T *values, const size_t size, T &min, T &max);
ADIOS2_FOREACH_PRIMITIVE_STDTYPE_1ARG(declare_type)
#undef declare_type
#endif /* ADIOS2_HELPER_ADIOSCUDA_CU_ */
|
55e69e60baff4d31e61e1050fec7d223bb0ad14f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaCommon.h"
__device__
float Func(float number, FunctionType functionType)
{
switch (functionType) {
case FT_BINARY_STEP:
if (number > 0) {
return 1;
} else {
return 0;
}
case FT_BIPOLAR_STEP:
if (number > 0) {
return 1;
} else {
return -1;
}
case SIGMOID:
return 1.0f / (1.0f - exp(-number));
case FT_BIPOLAR_SIGMOID:
return -1.0f + (2.0f / (1.0f + exp(-number)));
case FT_HYPERBOLIC_TANGENT:
return tanh(number);
case FT_IDENTITY:
default:
return number;
}
}
__global__
void ActivationFloatKernel(float* results, float* thresholds, float* output, unsigned output_sz,
FunctionType functionType)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < output_sz) {
output[idx] = Func(results[idx] - thresholds[idx], functionType);
}
}
__global__
void ActivationBitKernel(float* results, float* thresholds, unsigned* output, unsigned output_sz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned offset = idx * BITS_PER_UNSIGNED;
if (output_sz > offset) {
unsigned toRead = device_min(BITS_PER_UNSIGNED, output_sz - offset);
unsigned threadOutput = 0;
unsigned mask = 0x80000000;
for (unsigned i = 0; i < toRead; i++) {
unsigned pos = offset + i;
if (results[pos] - thresholds[pos] > 0) {
threadOutput |= mask;
} else {
threadOutput &= ~mask;
}
mask >>= 1;
}
output[idx] = threadOutput;
}
}
extern "C" void cuda_activation(void* output, unsigned size, BufferType bufferType, float* results,
float* thresholds, FunctionType functionType, unsigned block_size)
{
unsigned grid_size;
switch (bufferType) {
case BT_BYTE:
{
std::string error = "cuda_activation is not implemented for BufferType BYTE.";
throw error;
}
case BT_FLOAT:
{
grid_size = ((size - 1) / block_size) + 1;
hipLaunchKernelGGL(( ActivationFloatKernel), dim3(grid_size), dim3(block_size) , 0, 0, results, thresholds, (float*)output, size, functionType);
}
break;
case BT_BIT:
case BT_SIGN:
{
grid_size = ((size - 1) / (block_size * BITS_PER_UNSIGNED)) + 1;
hipLaunchKernelGGL(( ActivationBitKernel), dim3(grid_size), dim3(block_size) , 0, 0, results, thresholds, (unsigned*)output, size);
}
break;
}
checkCUDAError("activation");
}
| 55e69e60baff4d31e61e1050fec7d223bb0ad14f.cu | #include "cudaCommon.h"
__device__
float Func(float number, FunctionType functionType)
{
switch (functionType) {
case FT_BINARY_STEP:
if (number > 0) {
return 1;
} else {
return 0;
}
case FT_BIPOLAR_STEP:
if (number > 0) {
return 1;
} else {
return -1;
}
case SIGMOID:
return 1.0f / (1.0f - exp(-number));
case FT_BIPOLAR_SIGMOID:
return -1.0f + (2.0f / (1.0f + exp(-number)));
case FT_HYPERBOLIC_TANGENT:
return tanh(number);
case FT_IDENTITY:
default:
return number;
}
}
__global__
void ActivationFloatKernel(float* results, float* thresholds, float* output, unsigned output_sz,
FunctionType functionType)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < output_sz) {
output[idx] = Func(results[idx] - thresholds[idx], functionType);
}
}
__global__
void ActivationBitKernel(float* results, float* thresholds, unsigned* output, unsigned output_sz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned offset = idx * BITS_PER_UNSIGNED;
if (output_sz > offset) {
unsigned toRead = device_min(BITS_PER_UNSIGNED, output_sz - offset);
unsigned threadOutput = 0;
unsigned mask = 0x80000000;
for (unsigned i = 0; i < toRead; i++) {
unsigned pos = offset + i;
if (results[pos] - thresholds[pos] > 0) {
threadOutput |= mask;
} else {
threadOutput &= ~mask;
}
mask >>= 1;
}
output[idx] = threadOutput;
}
}
extern "C" void cuda_activation(void* output, unsigned size, BufferType bufferType, float* results,
float* thresholds, FunctionType functionType, unsigned block_size)
{
unsigned grid_size;
switch (bufferType) {
case BT_BYTE:
{
std::string error = "cuda_activation is not implemented for BufferType BYTE.";
throw error;
}
case BT_FLOAT:
{
grid_size = ((size - 1) / block_size) + 1;
ActivationFloatKernel<<< grid_size, block_size >>>(results, thresholds, (float*)output, size, functionType);
}
break;
case BT_BIT:
case BT_SIGN:
{
grid_size = ((size - 1) / (block_size * BITS_PER_UNSIGNED)) + 1;
ActivationBitKernel<<< grid_size, block_size >>>(results, thresholds, (unsigned*)output, size);
}
break;
}
checkCUDAError("activation");
}
|
7dfd995aea5ea8a998b9e6666a33c070af3c98cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by chengjin on 2020-06-02.
//
#include "cu_utils.h"
#include "cu_math.h"
#include "normalize_kernel.h"
namespace quake {
namespace framework {
namespace ops_lib {
template<typename T>
__global__ static void _cal_group_norm(int row,int col,int channel,int stride,
T eps,const T* src,const T* gamma,const T* beta,const T* mean,const T* var,T* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
int param_idx = (idx/stride)%(channel);
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[param_idx]+(src[idx]-mean[j])*gamma[param_idx]/sqrt(var[j]+eps);
}
}
__global__ static void _cal_group_normH(int row,int col,int channel,int stride,
__half eps,const __half* src,const __half* gamma,const __half* beta,
const __half* mean,const __half* var,__half* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
int param_idx = (idx/stride)%(channel);
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[param_idx]+(src[idx]-mean[j])*gamma[param_idx]/hsqrt(var[j]+eps);
}
}
template<typename T>
__global__ static void _cal_norm(int row,int col,T eps,const T* src,
const T* gamma,const T* beta,const T* mean,const T* var,T* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[i]+(src[idx]-mean[j])*gamma[i]/sqrt(var[j]+eps);
}
}
__global__ static void _cal_normH(int row,int col,__half eps,const __half* src,
const __half* gamma,const __half* beta,const __half* mean,const __half* var,__half* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[i]+(src[idx]-mean[j])*gamma[i]/hsqrt(var[j]+eps);
}
}
//implements
template<typename T>
void groupnorm_forward_gpu(hipStream_t stream,const T* input,
const T* gamma,const T* beta,
T* buffer,T* mean,T* var,T* output,
int group,int batchsize,int channel,int input_h,int input_w,T eps)
{
//input shape [N,C,H,W],reshape to [N*G,C//G*H*W]
int mat_row=batchsize*group;
int mat_col=channel/group*input_h*input_w;
int stride=input_h*input_w;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
hipLaunchKernelGGL(( _cal_group_norm), dim3(Gr),dim3(Bl), 0, 0, mat_row,mat_col,channel,stride,eps,input,gamma,beta,mean,var,output);
}
void groupnorm_forward_gpu(hipStream_t stream,const __half* input,
const __half* gamma,const __half* beta,
__half* buffer,__half* mean,__half* var,__half* output,
int group,int batchsize,int channel,int input_h,int input_w,__half eps)
{
//input shape [N,C,H,W],reshape to [N*G,C//G*H*W]
int mat_row=batchsize*group;
int mat_col=channel/group*input_h*input_w;
int stride=input_h*input_w;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
hipLaunchKernelGGL(( _cal_group_normH), dim3(Gr),dim3(Bl), 0, 0, mat_row,mat_col,channel,stride,eps,input,gamma,beta,mean,var,output);
}
template<typename T>
void layernorm_forward_gpu(hipStream_t stream,const T* input,
const T* gamma,const T* beta,
T* buffer,T* mean,T* var,T* output,
int batchsize,int layer_len,int layer_dim,T eps)
{
//input shape [N,layer_len,layer_dim],reshape to [N*layer_len,layer_dim]
int mat_row=batchsize*layer_len;
int mat_col=layer_dim;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
hipLaunchKernelGGL(( _cal_norm), dim3(Gr),dim3(Bl), 0, 0, mat_row,mat_col,eps,input,gamma,beta,mean,var,output);
}
void layernorm_forward_gpu(hipStream_t stream,const __half* input,
const __half* gamma,const __half* beta,
__half* buffer,__half* mean,__half* var,__half* output,
int batchsize,int layer_len,int layer_dim,__half eps)
{
//input shape [N,layer_len,layer_dim],reshape to [N*layer_len,layer_dim]
int mat_row=batchsize*layer_len;
int mat_col=layer_dim;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
hipLaunchKernelGGL(( _cal_normH), dim3(Gr),dim3(Bl), 0, 0, mat_row,mat_col,eps,input,gamma,beta,mean,var,output);
}
template
void groupnorm_forward_gpu<float>(hipStream_t stream,const float* input,
const float* gamma,const float* beta,
float* buffer,float* mean,float* var,float* output,
int group,int batchsize,int channel,int input_h,int input_w,float eps);
template
void layernorm_forward_gpu<float>(hipStream_t stream,const float* input,
const float* gamma,const float* beta,
float* buffer,float* mean,float* var,float* output,
int batchsize,int layer_len,int layer_dim,float eps);
} // namespace ops_lib
} // namespace framework
} // namespace quake
| 7dfd995aea5ea8a998b9e6666a33c070af3c98cc.cu | //
// Created by chengjin on 2020-06-02.
//
#include "cu_utils.h"
#include "cu_math.h"
#include "normalize_kernel.h"
namespace quake {
namespace framework {
namespace ops_lib {
template<typename T>
__global__ static void _cal_group_norm(int row,int col,int channel,int stride,
T eps,const T* src,const T* gamma,const T* beta,const T* mean,const T* var,T* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
int param_idx = (idx/stride)%(channel);
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[param_idx]+(src[idx]-mean[j])*gamma[param_idx]/sqrt(var[j]+eps);
}
}
__global__ static void _cal_group_normH(int row,int col,int channel,int stride,
__half eps,const __half* src,const __half* gamma,const __half* beta,
const __half* mean,const __half* var,__half* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
int param_idx = (idx/stride)%(channel);
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[param_idx]+(src[idx]-mean[j])*gamma[param_idx]/hsqrt(var[j]+eps);
}
}
template<typename T>
__global__ static void _cal_norm(int row,int col,T eps,const T* src,
const T* gamma,const T* beta,const T* mean,const T* var,T* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[i]+(src[idx]-mean[j])*gamma[i]/sqrt(var[j]+eps);
}
}
__global__ static void _cal_normH(int row,int col,__half eps,const __half* src,
const __half* gamma,const __half* beta,const __half* mean,const __half* var,__half* dst)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int idx = i + j * col;
if (i < col && j < row){
//inputs=(inputs-mean)*gamma/np.sqrt(varience+eps)+beta
dst[idx]=beta[i]+(src[idx]-mean[j])*gamma[i]/hsqrt(var[j]+eps);
}
}
//implements
template<typename T>
void groupnorm_forward_gpu(cudaStream_t stream,const T* input,
const T* gamma,const T* beta,
T* buffer,T* mean,T* var,T* output,
int group,int batchsize,int channel,int input_h,int input_w,T eps)
{
//input shape [N,C,H,W],reshape to [N*G,C//G*H*W]
int mat_row=batchsize*group;
int mat_col=channel/group*input_h*input_w;
int stride=input_h*input_w;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
_cal_group_norm<<<Gr,Bl>>>(mat_row,mat_col,channel,stride,eps,input,gamma,beta,mean,var,output);
}
void groupnorm_forward_gpu(cudaStream_t stream,const __half* input,
const __half* gamma,const __half* beta,
__half* buffer,__half* mean,__half* var,__half* output,
int group,int batchsize,int channel,int input_h,int input_w,__half eps)
{
//input shape [N,C,H,W],reshape to [N*G,C//G*H*W]
int mat_row=batchsize*group;
int mat_col=channel/group*input_h*input_w;
int stride=input_h*input_w;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
_cal_group_normH<<<Gr,Bl>>>(mat_row,mat_col,channel,stride,eps,input,gamma,beta,mean,var,output);
}
template<typename T>
void layernorm_forward_gpu(cudaStream_t stream,const T* input,
const T* gamma,const T* beta,
T* buffer,T* mean,T* var,T* output,
int batchsize,int layer_len,int layer_dim,T eps)
{
//input shape [N,layer_len,layer_dim],reshape to [N*layer_len,layer_dim]
int mat_row=batchsize*layer_len;
int mat_col=layer_dim;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
_cal_norm<<<Gr,Bl>>>(mat_row,mat_col,eps,input,gamma,beta,mean,var,output);
}
void layernorm_forward_gpu(cudaStream_t stream,const __half* input,
const __half* gamma,const __half* beta,
__half* buffer,__half* mean,__half* var,__half* output,
int batchsize,int layer_len,int layer_dim,__half eps)
{
//input shape [N,layer_len,layer_dim],reshape to [N*layer_len,layer_dim]
int mat_row=batchsize*layer_len;
int mat_col=layer_dim;
//get mean,var
gpu_moment_mat_cols(stream,mat_row,mat_col,input,buffer,mean,var);
//get result
dim3 Bl(CU2DBLOCK,CU2DBLOCK);
dim3 Gr(n_blocks(mat_col,CU2DBLOCK),n_blocks(mat_row,CU2DBLOCK));
_cal_normH<<<Gr,Bl>>>(mat_row,mat_col,eps,input,gamma,beta,mean,var,output);
}
template
void groupnorm_forward_gpu<float>(cudaStream_t stream,const float* input,
const float* gamma,const float* beta,
float* buffer,float* mean,float* var,float* output,
int group,int batchsize,int channel,int input_h,int input_w,float eps);
template
void layernorm_forward_gpu<float>(cudaStream_t stream,const float* input,
const float* gamma,const float* beta,
float* buffer,float* mean,float* var,float* output,
int batchsize,int layer_len,int layer_dim,float eps);
} // namespace ops_lib
} // namespace framework
} // namespace quake
|
3863698e186cd702bfb42f7951b4bca9705b1a13.hip | // !!! This is a file automatically generated by hipify!!!
#include "cpu_subs.h"
#include "subroutines.cuh"
int main()
{
// timing variables
float elapsed_time_ms = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// assign variables
int nreps = 4; // repeated counts, to average execution time
//**********************************************************//
// data size
unsigned data_size = 3015; // 4 Megabyte data
// int data_size = 256 - 1;
//**********************************************************//
long nbytes = data_size*sizeof(double);
double gb = nbytes / (double)1e9;
// host data
double *h_data = 0;
h_data = (double*)malloc(nbytes);
printf("allocated %.2f MB on CPU\n", nbytes / (1024.f*1024.f));
for (unsigned i = 0; i < data_size; i++)
h_data[i] = 1.0f + i;
// device data
double *d_data = 0;
hipMalloc( (void**)&d_data, nbytes );
printf("allocated %.2f MB on GPU\n", nbytes/(1024.f*1024.f) );
hipEventRecord(start, 0);
hipMemcpy(d_data, h_data, nbytes, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
elapsed_time_ms /= nreps;
printf("host to device transfer bandWidth: %f Gb/s\n", gb / elapsed_time_ms * 1e3);
// actual computation
double result_cpu = 0.0;
elapsed_time_ms = timing_experiment_cpu(reduction_cpu, h_data, data_size, nreps, &result_cpu);
printf("CPU reduction (kernel 00): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_cpu);
double result_CUDA = 0.0;
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu(d_data, data_size);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (from book): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score10, d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 10): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score20, d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 20): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score30, d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 30): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_half_wrap(kernel_reduce_half_score40, d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 40): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_half_wrap(kernel_reduce_half_score50, d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 50): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
hipEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_reduce_recursive_cuda(d_data, data_size, 0);
}
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 60): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
printf("\nCUDA: %s\n", hipGetErrorString( hipGetLastError() ) );
if (d_data)
hipFree(d_data);
if (h_data)
free(h_data);
// hipDeviceReset();
hipEventDestroy(start);
hipEventDestroy(stop);
getchar();
return 0;
} | 3863698e186cd702bfb42f7951b4bca9705b1a13.cu |
#include "cpu_subs.h"
#include "subroutines.cuh"
int main()
{
// timing variables
float elapsed_time_ms = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// assign variables
int nreps = 4; // repeated counts, to average execution time
//**********************************************************//
// data size
unsigned data_size = 3015; // 4 Megabyte data
// int data_size = 256 - 1;
//**********************************************************//
long nbytes = data_size*sizeof(double);
double gb = nbytes / (double)1e9;
// host data
double *h_data = 0;
h_data = (double*)malloc(nbytes);
printf("allocated %.2f MB on CPU\n", nbytes / (1024.f*1024.f));
for (unsigned i = 0; i < data_size; i++)
h_data[i] = 1.0f + i;
// device data
double *d_data = 0;
cudaMalloc( (void**)&d_data, nbytes );
printf("allocated %.2f MB on GPU\n", nbytes/(1024.f*1024.f) );
cudaEventRecord(start, 0);
cudaMemcpy(d_data, h_data, nbytes, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
elapsed_time_ms /= nreps;
printf("host to device transfer bandWidth: %f Gb/s\n", gb / elapsed_time_ms * 1e3);
// actual computation
double result_cpu = 0.0;
elapsed_time_ms = timing_experiment_cpu(reduction_cpu, h_data, data_size, nreps, &result_cpu);
printf("CPU reduction (kernel 00): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_cpu);
double result_CUDA = 0.0;
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu(d_data, data_size);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (from book): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score10, d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 10): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score20, d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 20): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_full(kernel_reduce_score30, d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 30): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_half_wrap(kernel_reduce_half_score40, d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 40): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_gpu_reduce_half_wrap(kernel_reduce_half_score50, d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 50): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
elapsed_time_ms = 0.0f;
cudaEventRecord(start, 0);
for (int i = 0; i < nreps; i++){
result_CUDA = sum_reduce_recursive_cuda(d_data, data_size, 0);
}
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("CUDA reduction (kernel 60): execution = %8.4f, bandwidth = %8.4f Gb/s, result = %f\n", elapsed_time_ms / nreps, gb * nreps / elapsed_time_ms * 1e3, result_CUDA);
printf("\nCUDA: %s\n", cudaGetErrorString( cudaGetLastError() ) );
if (d_data)
cudaFree(d_data);
if (h_data)
free(h_data);
// cudaThreadExit();
cudaEventDestroy(start);
cudaEventDestroy(stop);
getchar();
return 0;
} |
6a6e75d65b7d27859d2abd522ceaedf0a472bc3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ float activation(float input) {
return 1 / (1 + (exp((float) -1 * (input))));
}
__global__ void cuda_neural_network(float* input, float* next, float* weights) {
extern __shared__ float buffer[];
float inputWeight;
float inputBias;
float tmp;
inputWeight = weights[(blockDim.x + 1) * blockIdx.x + threadIdx.x];
if (threadIdx.x == 0) {
inputBias = weights[(blockDim.x + 1) * blockIdx.x + blockDim.x];
}
tmp = input[threadIdx.x] * inputWeight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
next[blockIdx.x] = activation(tmp + inputBias);
}
}
__global__ void cuda_neural_network_error(float* current, float* next,
float* weights, float* learning, float* labels, bool override) {
extern __shared__ float buffer[];
float weight;
float bias;
float error;
float tmp;
float output;
float l;
int weightIndex = blockIdx.x + threadIdx.x * (gridDim.x + 1);
weight = weights[weightIndex];
if (blockIdx.x == 0) {
bias = weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1];
}
error = next[threadIdx.x];
if (labels != NULL) {
error = error * (1 - error) * (labels[threadIdx.x] - error);
}
tmp = error * weight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
output = current[blockIdx.x];
l = *learning;
buffer[1] = output * l;
buffer[2] = l;
}
__syncthreads();
weights[weightIndex] = weight + buffer[1] * error;
if (blockIdx.x == 0) {
weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1] = bias + error * buffer[2];
}
if (threadIdx.x == 0 && override) {
current[blockIdx.x] = output * (1 - output) * tmp;
}
}
| 6a6e75d65b7d27859d2abd522ceaedf0a472bc3a.cu | __device__ float activation(float input) {
return 1 / (1 + (exp((float) -1 * (input))));
}
__global__ void cuda_neural_network(float* input, float* next, float* weights) {
extern __shared__ float buffer[];
float inputWeight;
float inputBias;
float tmp;
inputWeight = weights[(blockDim.x + 1) * blockIdx.x + threadIdx.x];
if (threadIdx.x == 0) {
inputBias = weights[(blockDim.x + 1) * blockIdx.x + blockDim.x];
}
tmp = input[threadIdx.x] * inputWeight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
next[blockIdx.x] = activation(tmp + inputBias);
}
}
__global__ void cuda_neural_network_error(float* current, float* next,
float* weights, float* learning, float* labels, bool override) {
extern __shared__ float buffer[];
float weight;
float bias;
float error;
float tmp;
float output;
float l;
int weightIndex = blockIdx.x + threadIdx.x * (gridDim.x + 1);
weight = weights[weightIndex];
if (blockIdx.x == 0) {
bias = weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1];
}
error = next[threadIdx.x];
if (labels != NULL) {
error = error * (1 - error) * (labels[threadIdx.x] - error);
}
tmp = error * weight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
output = current[blockIdx.x];
l = *learning;
buffer[1] = output * l;
buffer[2] = l;
}
__syncthreads();
weights[weightIndex] = weight + buffer[1] * error;
if (blockIdx.x == 0) {
weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1] = bias + error * buffer[2];
}
if (threadIdx.x == 0 && override) {
current[blockIdx.x] = output * (1 - output) * tmp;
}
}
|
064688eb5dcfc5ead1559d6cff0b07d0b5541a65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5648970938428,0.00129099627291822,0.779627100242668,0.779474715113613,0.000174684219692162,0.485055176588450,0.00294100425939516,0.999998347151507,1.93457581752971e-08,1.89191389020467e-05,0.999774845968480,1.00680772549184,0.999989387551389,5.18809066986018e-05,0.681378127611208,9.60485655711624,139.873994771155};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9786573191327,0.000101405698327014,0.000142148787953723,0.000463565697084509,0.267335896281351,0.177864842495357,0.143352947080335,3.71301063907955,0.0174610046899325,2.13299948058035,1097.67978270442,0.000530033794995171,0.0990576312693361,0.0136941761044135,0.00497377855346063,4.38287503674063e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 064688eb5dcfc5ead1559d6cff0b07d0b5541a65.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5648970938428,0.00129099627291822,0.779627100242668,0.779474715113613,0.000174684219692162,0.485055176588450,0.00294100425939516,0.999998347151507,1.93457581752971e-08,1.89191389020467e-05,0.999774845968480,1.00680772549184,0.999989387551389,5.18809066986018e-05,0.681378127611208,9.60485655711624,139.873994771155};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9786573191327,0.000101405698327014,0.000142148787953723,0.000463565697084509,0.267335896281351,0.177864842495357,0.143352947080335,3.71301063907955,0.0174610046899325,2.13299948058035,1097.67978270442,0.000530033794995171,0.0990576312693361,0.0136941761044135,0.00497377855346063,4.38287503674063e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
b0a4efd5a1b898a2d45be0a52cfe02a55a00491b.hip | // !!! This is a file automatically generated by hipify!!!
#include <optix.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math_functions.h>
#include "raycaster.h"
constexpr int numCasts = 360 * 180;
const int elevationDegrees = 180;
const int elevationLimit = 90;
enum Result
{
Miss = 0,
Hit
};
__device__ void computeRay(uint3 idx, float3& direction)
{
int elevation = idx.x % elevationDegrees - elevationLimit;
int azimuth = floorf(idx.x / elevationDegrees);
direction.x = sinf(azimuth) * cosf(elevation);
direction.y = cosf(azimuth) * cosf(elevation);
direction.z = sinf(elevation);
}
extern "C" __global__ void __raygen__rg()
{
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
if (idx.x < dim.x && idx.x < numCasts)
{
float3 rayOrigin = params.origin;
float3 rayDirection;
computeRay(idx, rayDirection);
unsigned int p0 = Miss;
OptixTraversableHandle handle = params.handle;
//optixTrace(
// params.handle,
// rayOrigin,
// rayDirection,
// 0.0f,
// 1000.0f,
// 0.0f,
// OptixVisibilityMask(255),
// OPTIX_RAY_FLAG_NONE,
// 0,
// 1,
// 0,
// p0
//);
params.results[idx.x].x = p0;
}
}
extern "C" __global__ void __closesthit__ch()
{
optixSetPayload_0(Hit);
}
extern "C" __global__ void __miss__ms()
{
optixSetPayload_0(Miss);
}
| b0a4efd5a1b898a2d45be0a52cfe02a55a00491b.cu | #include <optix.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math_functions.h>
#include "raycaster.h"
constexpr int numCasts = 360 * 180;
const int elevationDegrees = 180;
const int elevationLimit = 90;
enum Result
{
Miss = 0,
Hit
};
__device__ void computeRay(uint3 idx, float3& direction)
{
int elevation = idx.x % elevationDegrees - elevationLimit;
int azimuth = floorf(idx.x / elevationDegrees);
direction.x = sinf(azimuth) * cosf(elevation);
direction.y = cosf(azimuth) * cosf(elevation);
direction.z = sinf(elevation);
}
extern "C" __global__ void __raygen__rg()
{
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
if (idx.x < dim.x && idx.x < numCasts)
{
float3 rayOrigin = params.origin;
float3 rayDirection;
computeRay(idx, rayDirection);
unsigned int p0 = Miss;
OptixTraversableHandle handle = params.handle;
//optixTrace(
// params.handle,
// rayOrigin,
// rayDirection,
// 0.0f,
// 1000.0f,
// 0.0f,
// OptixVisibilityMask(255),
// OPTIX_RAY_FLAG_NONE,
// 0,
// 1,
// 0,
// p0
//);
params.results[idx.x].x = p0;
}
}
extern "C" __global__ void __closesthit__ch()
{
optixSetPayload_0(Hit);
}
extern "C" __global__ void __miss__ms()
{
optixSetPayload_0(Miss);
}
|
eefa62fa7cc843b4adc4686ac2ca790cacaee441.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_r2;
int xdim0_update_halo_kernel1_r2_h = -1;
__constant__ int ydim0_update_halo_kernel1_r2;
int ydim0_update_halo_kernel1_r2_h = -1;
__constant__ int xdim1_update_halo_kernel1_r2;
int xdim1_update_halo_kernel1_r2_h = -1;
__constant__ int ydim1_update_halo_kernel1_r2;
int ydim1_update_halo_kernel1_r2_h = -1;
__constant__ int xdim2_update_halo_kernel1_r2;
int xdim2_update_halo_kernel1_r2_h = -1;
__constant__ int ydim2_update_halo_kernel1_r2;
int ydim2_update_halo_kernel1_r2_h = -1;
__constant__ int xdim3_update_halo_kernel1_r2;
int xdim3_update_halo_kernel1_r2_h = -1;
__constant__ int ydim3_update_halo_kernel1_r2;
int ydim3_update_halo_kernel1_r2_h = -1;
__constant__ int xdim4_update_halo_kernel1_r2;
int xdim4_update_halo_kernel1_r2_h = -1;
__constant__ int ydim4_update_halo_kernel1_r2;
int ydim4_update_halo_kernel1_r2_h = -1;
__constant__ int xdim5_update_halo_kernel1_r2;
int xdim5_update_halo_kernel1_r2_h = -1;
__constant__ int ydim5_update_halo_kernel1_r2;
int ydim5_update_halo_kernel1_r2_h = -1;
__constant__ int xdim6_update_halo_kernel1_r2;
int xdim6_update_halo_kernel1_r2_h = -1;
__constant__ int ydim6_update_halo_kernel1_r2;
int ydim6_update_halo_kernel1_r2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_r2*(y)+xdim0_update_halo_kernel1_r2*ydim0_update_halo_kernel1_r2*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_r2*(y)+xdim1_update_halo_kernel1_r2*ydim1_update_halo_kernel1_r2*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_r2*(y)+xdim2_update_halo_kernel1_r2*ydim2_update_halo_kernel1_r2*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_r2*(y)+xdim3_update_halo_kernel1_r2*ydim3_update_halo_kernel1_r2*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_r2*(y)+xdim4_update_halo_kernel1_r2*ydim4_update_halo_kernel1_r2*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_r2*(y)+xdim5_update_halo_kernel1_r2*ydim5_update_halo_kernel1_r2*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_r2*(y)+xdim6_update_halo_kernel1_r2*ydim6_update_halo_kernel1_r2*(z))
//user function
__device__
inline void update_halo_kernel1_r2_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(-3,0,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(-3,0,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(-3,0,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(-3,0,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(-3,0,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(-3,0,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(-3,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_r2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_r2 + idx_z * 1*1 * xdim0_update_halo_kernel1_r2 * ydim0_update_halo_kernel1_r2;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_r2 + idx_z * 1*1 * xdim1_update_halo_kernel1_r2 * ydim1_update_halo_kernel1_r2;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_r2 + idx_z * 1*1 * xdim2_update_halo_kernel1_r2 * ydim2_update_halo_kernel1_r2;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_r2 + idx_z * 1*1 * xdim3_update_halo_kernel1_r2 * ydim3_update_halo_kernel1_r2;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_r2 + idx_z * 1*1 * xdim4_update_halo_kernel1_r2 * ydim4_update_halo_kernel1_r2;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_r2 + idx_z * 1*1 * xdim5_update_halo_kernel1_r2 * ydim5_update_halo_kernel1_r2;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_r2 + idx_z * 1*1 * xdim6_update_halo_kernel1_r2 * ydim6_update_halo_kernel1_r2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_r2_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,17)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_r2");
OPS_kernels[17].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_r2_h || ydim0 != ydim0_update_halo_kernel1_r2_h || xdim1 != xdim1_update_halo_kernel1_r2_h || ydim1 != ydim1_update_halo_kernel1_r2_h || xdim2 != xdim2_update_halo_kernel1_r2_h || ydim2 != ydim2_update_halo_kernel1_r2_h || xdim3 != xdim3_update_halo_kernel1_r2_h || ydim3 != ydim3_update_halo_kernel1_r2_h || xdim4 != xdim4_update_halo_kernel1_r2_h || ydim4 != ydim4_update_halo_kernel1_r2_h || xdim5 != xdim5_update_halo_kernel1_r2_h || ydim5 != ydim5_update_halo_kernel1_r2_h || xdim6 != xdim6_update_halo_kernel1_r2_h || ydim6 != ydim6_update_halo_kernel1_r2_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_r2, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_r2_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel1_r2, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_r2_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_r2, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_r2_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel1_r2, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_r2_h = ydim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_r2, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_r2_h = xdim2;
hipMemcpyToSymbol( ydim2_update_halo_kernel1_r2, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_r2_h = ydim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_r2, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_r2_h = xdim3;
hipMemcpyToSymbol( ydim3_update_halo_kernel1_r2, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_r2_h = ydim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_r2, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_r2_h = xdim4;
hipMemcpyToSymbol( ydim4_update_halo_kernel1_r2, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_r2_h = ydim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_r2, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_r2_h = xdim5;
hipMemcpyToSymbol( ydim5_update_halo_kernel1_r2, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_r2_h = ydim5;
hipMemcpyToSymbol( xdim6_update_halo_kernel1_r2, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_r2_h = xdim6;
hipMemcpyToSymbol( ydim6_update_halo_kernel1_r2, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_r2_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_r2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[17].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 17;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 17;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_r2");
}
ops_enqueue_kernel(desc);
}
#endif
| eefa62fa7cc843b4adc4686ac2ca790cacaee441.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_r2;
int xdim0_update_halo_kernel1_r2_h = -1;
__constant__ int ydim0_update_halo_kernel1_r2;
int ydim0_update_halo_kernel1_r2_h = -1;
__constant__ int xdim1_update_halo_kernel1_r2;
int xdim1_update_halo_kernel1_r2_h = -1;
__constant__ int ydim1_update_halo_kernel1_r2;
int ydim1_update_halo_kernel1_r2_h = -1;
__constant__ int xdim2_update_halo_kernel1_r2;
int xdim2_update_halo_kernel1_r2_h = -1;
__constant__ int ydim2_update_halo_kernel1_r2;
int ydim2_update_halo_kernel1_r2_h = -1;
__constant__ int xdim3_update_halo_kernel1_r2;
int xdim3_update_halo_kernel1_r2_h = -1;
__constant__ int ydim3_update_halo_kernel1_r2;
int ydim3_update_halo_kernel1_r2_h = -1;
__constant__ int xdim4_update_halo_kernel1_r2;
int xdim4_update_halo_kernel1_r2_h = -1;
__constant__ int ydim4_update_halo_kernel1_r2;
int ydim4_update_halo_kernel1_r2_h = -1;
__constant__ int xdim5_update_halo_kernel1_r2;
int xdim5_update_halo_kernel1_r2_h = -1;
__constant__ int ydim5_update_halo_kernel1_r2;
int ydim5_update_halo_kernel1_r2_h = -1;
__constant__ int xdim6_update_halo_kernel1_r2;
int xdim6_update_halo_kernel1_r2_h = -1;
__constant__ int ydim6_update_halo_kernel1_r2;
int ydim6_update_halo_kernel1_r2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_r2*(y)+xdim0_update_halo_kernel1_r2*ydim0_update_halo_kernel1_r2*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_r2*(y)+xdim1_update_halo_kernel1_r2*ydim1_update_halo_kernel1_r2*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_r2*(y)+xdim2_update_halo_kernel1_r2*ydim2_update_halo_kernel1_r2*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_r2*(y)+xdim3_update_halo_kernel1_r2*ydim3_update_halo_kernel1_r2*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_r2*(y)+xdim4_update_halo_kernel1_r2*ydim4_update_halo_kernel1_r2*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_r2*(y)+xdim5_update_halo_kernel1_r2*ydim5_update_halo_kernel1_r2*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_r2*(y)+xdim6_update_halo_kernel1_r2*ydim6_update_halo_kernel1_r2*(z))
//user function
__device__
inline void update_halo_kernel1_r2_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(-3,0,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(-3,0,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(-3,0,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(-3,0,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(-3,0,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(-3,0,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(-3,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_r2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_r2 + idx_z * 1*1 * xdim0_update_halo_kernel1_r2 * ydim0_update_halo_kernel1_r2;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_r2 + idx_z * 1*1 * xdim1_update_halo_kernel1_r2 * ydim1_update_halo_kernel1_r2;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_r2 + idx_z * 1*1 * xdim2_update_halo_kernel1_r2 * ydim2_update_halo_kernel1_r2;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_r2 + idx_z * 1*1 * xdim3_update_halo_kernel1_r2 * ydim3_update_halo_kernel1_r2;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_r2 + idx_z * 1*1 * xdim4_update_halo_kernel1_r2 * ydim4_update_halo_kernel1_r2;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_r2 + idx_z * 1*1 * xdim5_update_halo_kernel1_r2 * ydim5_update_halo_kernel1_r2;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_r2 + idx_z * 1*1 * xdim6_update_halo_kernel1_r2 * ydim6_update_halo_kernel1_r2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_r2_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,17)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_r2");
OPS_kernels[17].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_r2_h || ydim0 != ydim0_update_halo_kernel1_r2_h || xdim1 != xdim1_update_halo_kernel1_r2_h || ydim1 != ydim1_update_halo_kernel1_r2_h || xdim2 != xdim2_update_halo_kernel1_r2_h || ydim2 != ydim2_update_halo_kernel1_r2_h || xdim3 != xdim3_update_halo_kernel1_r2_h || ydim3 != ydim3_update_halo_kernel1_r2_h || xdim4 != xdim4_update_halo_kernel1_r2_h || ydim4 != ydim4_update_halo_kernel1_r2_h || xdim5 != xdim5_update_halo_kernel1_r2_h || ydim5 != ydim5_update_halo_kernel1_r2_h || xdim6 != xdim6_update_halo_kernel1_r2_h || ydim6 != ydim6_update_halo_kernel1_r2_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_r2, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_r2_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel1_r2, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_r2_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_r2, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_r2_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel1_r2, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_r2_h = ydim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_r2, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_r2_h = xdim2;
cudaMemcpyToSymbol( ydim2_update_halo_kernel1_r2, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_r2_h = ydim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_r2, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_r2_h = xdim3;
cudaMemcpyToSymbol( ydim3_update_halo_kernel1_r2, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_r2_h = ydim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_r2, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_r2_h = xdim4;
cudaMemcpyToSymbol( ydim4_update_halo_kernel1_r2, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_r2_h = ydim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_r2, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_r2_h = xdim5;
cudaMemcpyToSymbol( ydim5_update_halo_kernel1_r2, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_r2_h = ydim5;
cudaMemcpyToSymbol( xdim6_update_halo_kernel1_r2, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_r2_h = xdim6;
cudaMemcpyToSymbol( ydim6_update_halo_kernel1_r2, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_r2_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_r2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[17].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[17].mpi_time += t2-t1;
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[17].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 17;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 17;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(17,"update_halo_kernel1_r2");
}
ops_enqueue_kernel(desc);
}
#endif
|
36fcab725998ebf69d950cca2c0f247f46c9479c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2015 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha, const int num_elements,
const float *x, const float *y, float *z)
{
int tid = blockIdx.x*blockDim.x+threadIdx.x;
if (tid >= num_elements)
return;
z[tid] = alpha * x[tid] + y[tid];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
Rect<1> actual_bounds;
ByteOffset offsets;
// These are all device pointers
const float *x_ptr = (const float*)saxpy_args->x_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
assert(actual_bounds == saxpy_args->bounds);
const float *y_ptr = (const float*)saxpy_args->y_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
assert(actual_bounds == saxpy_args->bounds);
float *z_ptr = (float*)saxpy_args->z_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
size_t num_elements = actual_bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, num_elements,
x_ptr, y_ptr, z_ptr);
// LOOK: NO WAIT! :)
}
| 36fcab725998ebf69d950cca2c0f247f46c9479c.cu | /* Copyright 2015 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha, const int num_elements,
const float *x, const float *y, float *z)
{
int tid = blockIdx.x*blockDim.x+threadIdx.x;
if (tid >= num_elements)
return;
z[tid] = alpha * x[tid] + y[tid];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
Rect<1> actual_bounds;
ByteOffset offsets;
// These are all device pointers
const float *x_ptr = (const float*)saxpy_args->x_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
assert(actual_bounds == saxpy_args->bounds);
const float *y_ptr = (const float*)saxpy_args->y_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
assert(actual_bounds == saxpy_args->bounds);
float *z_ptr = (float*)saxpy_args->z_inst.get_accessor().
raw_dense_ptr<1>(saxpy_args->bounds, actual_bounds, offsets);
size_t num_elements = actual_bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, num_elements,
x_ptr, y_ptr, z_ptr);
// LOOK: NO WAIT! :)
}
|
bad4ea5fd659b0dcdf780e9bee90c7dea38e706f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolutionZ_63_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
hipMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
hipMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int outofbounds = 1;
float outofboundsvalue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolutionZ_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolutionZ_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolutionZ_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bad4ea5fd659b0dcdf780e9bee90c7dea38e706f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolutionZ_63_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
cudaMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
cudaMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int outofbounds = 1;
float outofboundsvalue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolutionZ_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolutionZ_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolutionZ_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a5ad8167503bccd2268460a6963dd2a7be697052.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <string>
#include "process_image.h"
__device__
int
AccessPixel(
_In_ uchar* Arr,
_In_ int Col,
_In_ int Row,
_In_ int K,
_In_ int Width,
_In_ int Height
)
{
int kernel[3][3] = { 1, 2, 1,
2, 4, 2,
1, 2, 1 };
int sum = 0;
int sumKernel = 0;
for (int j = -1; j <= 1; j++)
{
for (int i = -1; i <= 1; i++)
{
if ((Row + j) >= 0 && (Row + j) < Height && (Col + i) >= 0 && (Col + i) < Width)
{
int color = Arr[(Row + j) * 3 * Width + (Col + i) * 3 + K];
sum += color * kernel[i + 1][j + 1];
sumKernel += kernel[i + 1][j + 1];
}
}
}
return sum / sumKernel;
}
__global__
void CudaPixelWorker(
_In_ unsigned char* Img,
_Out_ unsigned char* Res,
_In_ int Width,
_In_ int Height
)
{
// blockDim.x - dimension of a block (256; must me multiple of 32)
// blockIdx.x - current block index
// threadIdx.x - current id of the thread in the 1D array (nvidia GRID)
// gridDim.x - total blocks
int startIndex = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int row = startIndex; row < Height; row += stride)
{
for (int col = 0; col < Width; col++)
{
for (int k = 0; k < 3; k++)
{
Res[3 * row * Width + 3 * col + k] = AccessPixel(Img, col, row, k, Width, Height);
}
}
}
}
void
GaussianBlur2D(
_In_ const std::string& FileName
)
{
cv::Mat3b img = cv::imread(FileName, cv::IMREAD_COLOR);
cv::Mat3b out(img.rows, img.cols);
uchar *buffer;
uchar *buffer2;
if (!img.data) {
printf("Failed to read image\n");
exit(2);
}
hipError_t hipError_t;
hipError_t = hipMallocManaged(&buffer, img.rows * img.cols * 3 * sizeof(uchar));
hipError_t = hipMallocManaged(&buffer2, img.rows * img.cols * 3 * sizeof(uchar));
if (hipError_t != hipSuccess)
{
std::cout << "[Error] - Cuda Error!" << std::endl;
}
out.data = buffer2;
hipMemcpyAsync(buffer, img.data, img.rows * img.cols * 3, hipMemcpyHostToDevice);
img.data = buffer;
int threadsPerBlock = 256;
int totalBlocks = (img.rows + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( CudaPixelWorker), dim3(totalBlocks), dim3(threadsPerBlock), 0, 0, img.data, out.data, img.cols, img.rows);
hipDeviceSynchronize();
std::cout << "Trying to save image to output file" << std::endl;
cv::imwrite("output.bmp", out);
hipFree(buffer);
hipFree(buffer2);
}
int main(int argc, char** argv)
{
std::string fileName;
std::cout << "Cuda started" << std::endl;
Usage(argc, argv, fileName);
std::cout << "Filename: " << fileName << std::endl;
GaussianBlur2D(fileName);
return 0;
} | a5ad8167503bccd2268460a6963dd2a7be697052.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <string>
#include "process_image.h"
__device__
int
AccessPixel(
_In_ uchar* Arr,
_In_ int Col,
_In_ int Row,
_In_ int K,
_In_ int Width,
_In_ int Height
)
{
int kernel[3][3] = { 1, 2, 1,
2, 4, 2,
1, 2, 1 };
int sum = 0;
int sumKernel = 0;
for (int j = -1; j <= 1; j++)
{
for (int i = -1; i <= 1; i++)
{
if ((Row + j) >= 0 && (Row + j) < Height && (Col + i) >= 0 && (Col + i) < Width)
{
int color = Arr[(Row + j) * 3 * Width + (Col + i) * 3 + K];
sum += color * kernel[i + 1][j + 1];
sumKernel += kernel[i + 1][j + 1];
}
}
}
return sum / sumKernel;
}
__global__
void CudaPixelWorker(
_In_ unsigned char* Img,
_Out_ unsigned char* Res,
_In_ int Width,
_In_ int Height
)
{
// blockDim.x - dimension of a block (256; must me multiple of 32)
// blockIdx.x - current block index
// threadIdx.x - current id of the thread in the 1D array (nvidia GRID)
// gridDim.x - total blocks
int startIndex = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int row = startIndex; row < Height; row += stride)
{
for (int col = 0; col < Width; col++)
{
for (int k = 0; k < 3; k++)
{
Res[3 * row * Width + 3 * col + k] = AccessPixel(Img, col, row, k, Width, Height);
}
}
}
}
void
GaussianBlur2D(
_In_ const std::string& FileName
)
{
cv::Mat3b img = cv::imread(FileName, cv::IMREAD_COLOR);
cv::Mat3b out(img.rows, img.cols);
uchar *buffer;
uchar *buffer2;
if (!img.data) {
printf("Failed to read image\n");
exit(2);
}
cudaError_t cudaError;
cudaError = cudaMallocManaged(&buffer, img.rows * img.cols * 3 * sizeof(uchar));
cudaError = cudaMallocManaged(&buffer2, img.rows * img.cols * 3 * sizeof(uchar));
if (cudaError != cudaSuccess)
{
std::cout << "[Error] - Cuda Error!" << std::endl;
}
out.data = buffer2;
cudaMemcpyAsync(buffer, img.data, img.rows * img.cols * 3, cudaMemcpyHostToDevice);
img.data = buffer;
int threadsPerBlock = 256;
int totalBlocks = (img.rows + threadsPerBlock - 1) / threadsPerBlock;
CudaPixelWorker<<<totalBlocks, threadsPerBlock>>>(img.data, out.data, img.cols, img.rows);
cudaDeviceSynchronize();
std::cout << "Trying to save image to output file" << std::endl;
cv::imwrite("output.bmp", out);
cudaFree(buffer);
cudaFree(buffer2);
}
int main(int argc, char** argv)
{
std::string fileName;
std::cout << "Cuda started" << std::endl;
Usage(argc, argv, fileName);
std::cout << "Filename: " << fileName << std::endl;
GaussianBlur2D(fileName);
return 0;
} |
1927ca6e9baf00d108a4274eb4ec9f3211aad910.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************************
* File: BP_CUDA.cu
* Description: Belief Propagation for Ising Model, CUDA parallelized version
* by BreakDS, @ University of Wisconsin-Madison, Fri Aug 31 15:25:19 CDT 2012
*********************************************************************************/
#include "BP_CUDA.h"
#include <hip/hip_runtime.h>
#include "../utils/cuda_aux.hpp"
int host_patch_side;
namespace optimize_cuda
{
__constant__ int g_inc[4];
__constant__ float g_coeff[6];
__constant__ int g_patch_side;
// This function calculate the distance between vector
// ( a0, ..., a5 ) and ( b0, ..., b5 )
// direction should be specified
// __device__ inline float dist_device( const float a0, const float a1,
// const float a2, const float a3,
// const float a4, const float a5,
// const float b0, const float b1,
// const float b2, const float b3,
// const float b4, const float b5,
// int direction )
// {
// float tmp;
// // [0] = image index
// tmp = fabsf( b0 - a0 );
// if ( tmp > 0.5 ) return 150000.0;
// // [1], [2] = dy, dx (rotation representation)
// tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
// if ( tmp > 1.0 ) {
// return 150000.0;
// }
// // [4],[5] = spatial distance
// // Should be compensated by the rotation (dy,dx)
// // Note that dy = b1 dx = b2 by definition
// float ay(a4), ax(a5);
// if ( 0 == direction ) {
// ay -= b2;
// ax -= b1;
// } else if ( 1 == direction ) {
// ay += b1;
// ax -= b2;
// } else if ( 2 == direction ) {
// ay += b2;
// ax += b1;
// } else if ( 3 == direction ) {
// ay -= b1;
// ax += b2;
// }
// tmp = fabsf( ay - b4 ) + fabsf( ax - b5 );
// if ( tmp > g_patch_side ) {
// return 150000.0;
// }
// // [1],[2] = spatial distance
// float sum = tmp * g_coeff[4];
// sum += fabsf( b1 - a1 ) * g_coeff[1];
// sum += fabsf( b2 - a2 ) * g_coeff[2];
// sum += fabsf( b3 - a3 ) * g_coeff[3];
// return sum;
// }
// // The host version of dist_device()
// inline float dist_host( const float a0, const float a1,
// const float a2, const float a3,
// const float a4, const float a5,
// const float b0, const float b1,
// const float b2, const float b3,
// const float b4, const float b5,
// int direction )
// {
// const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
// float tmp;
// // [0] = image index
// tmp = fabsf( b0 - a0 );
// if ( tmp > 1.0 ) return 150000.0;
// // [1], [2] = dy, dx (rotation representation)
// tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
// if ( tmp > 1.0 ) {
// return 150000.0;
// }
// // [4],[5] = spatial distance
// // Should be compensated by the rotation (dy,dx)
// // Note that dy = b1 dx = b2 by definition
// float ay(a4), ax(a5);
// if ( 0 == direction ) {
// ay -= b2;
// ax -= b1;
// } else if ( 1 == direction ) {
// ay += b1;
// ax -= b2;
// } else if ( 2 == direction ) {
// ay += b2;
// ax += b1;
// } else if ( 3 == direction ) {
// ay -= b1;
// ax += b2;
// }
// tmp = fabsf( ay - b4 ) + fabsf( ax - b5 );
// if ( tmp > host_patch_side ) {
// return 150000.0;
// }
// // [1],[2] = spatial distance
// float sum = tmp * coeff[4];
// sum += fabsf( b1 - a1 ) * coeff[1];
// sum += fabsf( b2 - a2 ) * coeff[2];
// sum += fabsf( b3 - a3 ) * coeff[3];
// return sum;
// }
__device__ inline float dist_device( const float a0, const float a1,
const float a2, const float a3,
const float a4, const float a5,
const float b0, const float b1,
const float b2, const float b3,
const float b4, const float b5 )
{
float tmp;
// [0] = image index
tmp = fabsf( b0 - a0 );
if ( tmp > 0.5 ) return 500.0;
tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
if ( tmp > 1.5 ) {
return 500.0;
}
tmp = 0.0f;
tmp += fabsf( b1 - a1 ) * g_coeff[1];
tmp += fabsf( b2 - a2 ) * g_coeff[2];
tmp += fabsf( b4 - a4 ) * g_coeff[4];
tmp += fabsf( b5 - a5 ) * g_coeff[5];
return tmp;
}
// The host version of dist_device()
inline float dist_host( const float a0, const float a1,
const float a2, const float a3,
const float a4, const float a5,
const float b0, const float b1,
const float b2, const float b3,
const float b4, const float b5 )
{
const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
float tmp;
// [0] = image index
tmp = fabsf( b0 - a0 );
if ( tmp > 1.0 ) return 500.0;
tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
if ( tmp > 1.5 ) {
return 500.0;
}
tmp = 0.0f;
tmp += fabsf( b1 - a1 ) * coeff[1];
tmp += fabsf( b2 - a2 ) * coeff[2];
tmp += fabsf( b4 - a4 ) * coeff[4];
tmp += fabsf( b5 - b5 ) * coeff[5];
return tmp;
}
// normalize messages of a node (pixel) with the same direction (sum to 0)
// agentNum: 4 * #pixel (4 = number of directions)
// msg: the message array [dir(4), pixel(#pixel), k(K)]
// K: # of candidate labels
__global__ void NormalizeMsg_device( const int agentNum,
float *msg,
int K )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
float offset = 0.0f;
for ( int k=0; k<K; k++ ) {
offset += msg[ idx * K + k ];
}
offset /= K;
for ( int k=0; k<K; k++ ) {
msg[ idx * K + k ] -= offset;
}
}
}
// Update the result array (one cadidate label id for each pixel)
// agentNum: # of pixel
// D: self distance matrix [pixel(# of pixels), k(K)];
// msg: the message array [dir(4), pixel(# of pixels), k(K)]
// result: the result array [pixel(# of pixels)]
// K: # of labels per pixel
__global__ void UpdateResult_device( const int agentNum, const float *D,
const float* msg,
int *result, int K )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
result[idx] = 0;
float min = 0.0f;
for ( int k=0; k<K; k++ ) {
float sum = D[idx*K+k];
for ( int dir=0; dir<4; dir++ ) {
sum += msg[ ( dir * agentNum + idx ) * K + k ];
}
if ( 0 == k ) {
min = sum;
} else if ( sum < min ) {
min = sum;
result[idx] = k;
}
}
}
}
double UpdateEnergy( const float *D, const float *label, int height, int width, int K,
int dim, float lambda, int *result )
{
// constants
const int inc[4] = { -width, -1, width, 1 };
const int incDim[4] = {-width*K*dim,-K*dim,width*K*dim,K*dim};
double energy = 0.0;
int i = 0;
const float *labelp = label;
for ( int y=0; y<height; y++ ) {
for ( int x=0; x<width; x++ ) {
energy += D[i*K+result[i]];
// UP:
int d = 0;
if ( y > 0 ) {
const float *lp0 = labelp + result[i] * dim;
const float *lp1 = labelp + incDim[d] + result[i+inc[d]] * dim;
energy += dist_host( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] ) * lambda;
}
// LEFT:
d = 1;
if ( x > 0 ) {
const float *lp0 = labelp + result[i] * dim;
const float *lp1 = labelp + incDim[d] + result[i+inc[d]] * dim;
energy += dist_host( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] ) * lambda;
}
i++;
labelp += K * dim;
}
}
return energy;
}
// Main Kernel Function, which updates messages
// Currently this function takes 82.3% of the total cuda computation time
//
// agentNum: columns or rows, depnding on the direction
// D: self distance matrix [pixel(# of pixels), k(K)];
// msg: the message array [dir(4), pixel(#pixel), k(K)]
// distance: inter-pixel label distance matrix [k(K), dir(4), pixel(# of pixels)]
// begins: array [pixel(agentNum)], where begins[i] is the first pixel of agent i
// ends: array [pixel(agentNum)], where ends[i] is the last (exluded) pixel of agent i
// K: # of candidate labels
// lambda: coeffiecient of the inter-pixel label distance term
// dim: dimension of a label
// dir: current propagation direction
// inc: pixel increment in this direction
// incK: pixel increment, K times as larger as inc
// area: # of pixels
__global__ void UpdateMessage_device( int agentNum, // number of agents needes
float *D,
float* msg,
float* distance,
int *begins,
int *ends,
int K,
float lambda,
int dim,
int dir,
int inc,
int incK,
int area )
{
extern __shared__ float buf[];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
int k = threadIdx.y;
float *h = buf;
int opp = (dir+2) & 3;
float *Dp = D + begins[idx] * K;
int end = ends[idx];
for ( int i=begins[idx]; i!=end; i+=inc, Dp+=incK ) {
h[ k * blockDim.x + threadIdx.x ] = Dp[k];
for ( int j=0; j<4; j++ ) {
if ( j != opp ) {
h[ k * blockDim.x + threadIdx.x ] += msg[ j * area * K + i * K + k ];
}
}
__syncthreads();
float min = distance[ ( k * 4 + dir ) * area + i ] * lambda + h[ threadIdx.x ];
for ( int k0=1; k0<K; k0++ ) {
__syncthreads();
float value = distance[ ( ( k0 * K + k ) * 4 + dir ) * area + i ] * lambda +
h[ k0 * blockDim.x + threadIdx.x];
if ( value < min ) min = value;
}
msg[ dir * area * K + ( i + inc ) * K + k ] = min;
} // end for i
}
}
// Precompute the distance matrix that will be heavily used later
// area: # of pixels, also serve as the agentNum
// label: the label matrix [pixel(area), k(K), component(dim)]
// distance: inter-pixel label distance matrix [k(K), dir(4), pixel(# of pixels)]
// dim: dimension of each label
// K: # of candidate labels per pixel
__global__ void Precomputing_agent_float( int area,
float *label,
float *distance,
int dim,
int K )
{
int pixel = blockDim.x * blockIdx.x + threadIdx.x;
if ( pixel < area ) {
int dir = blockIdx.y;
// int k0 = threadIdx.y;
// int k1 = threadIdx.z;
int idx = ( ( threadIdx.y * K + threadIdx.z ) * 4 + dir ) * area + pixel;
// overflow checking
if ( pixel + g_inc[dir] < 0 || pixel + g_inc[dir] >= area ) {
return ;
}
float *lp0 = label + ( pixel * K + threadIdx.y ) * dim;
float *lp1 = label + ( ( pixel + g_inc[dir] ) * K + threadIdx.z ) * dim;
distance[idx] = dist_device( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] );
}
}
double LoopyBP( const float *D,
const float *label,
const int height,
const int width,
const int K,
const int dim,
int *result,
Options options, float* msgBuf )
{
const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
HANDLE_ERROR( hipMemcpyToSymbol( optimize_cuda::g_coeff, &coeff, sizeof(float) * 6, 0, hipMemcpyHostToDevice ) );
int patchSide = 17;
HANDLE_ERROR( hipMemcpyToSymbol( optimize_cuda::g_patch_side, &patchSide, sizeof(int), 0, hipMemcpyHostToDevice ) );
host_patch_side = patchSide;
const int inc[4] = { -width, -1, width, 1 };
HANDLE_ERROR( hipMemcpyToSymbol( optimize_cuda::g_inc, inc, sizeof(int) * 4, 0, hipMemcpyHostToDevice ) );
const int incK[4] = {-width*K,-K,width*K,K};
const int order[4] = {2,0,3,1}; // DOWN, UP, RIGHT, LEFT
const int area = height * width;
// Make sure that the message storage is allocated
float *buf = msgBuf;
if ( NULL == buf ) {
// Message buffer is not provided externally
buf = new float[4*width*height*K];
}
// Initialization of device memory
// Data term array
float *devD = NULL;
HANDLE_ERROR( hipMalloc( (void**) &devD, sizeof(float) * area * K ) );
HANDLE_ERROR( hipMemcpy( devD, D, sizeof(float) * area * K, hipMemcpyHostToDevice ) );
// Labels array
float *devLabel = NULL;
HANDLE_ERROR( hipMalloc( (void**) &devLabel, sizeof(float) * area * K * dim ) );
HANDLE_ERROR( hipMemcpy( devLabel, label, sizeof(float) * area * K * dim, hipMemcpyHostToDevice ) );
// Messages
// After these lines, msg[0] = up, msg[1] = left, msg[2] = right, msg[3] = down
float *devMsg = NULL;
HANDLE_ERROR( hipMalloc( (void**) &devMsg, sizeof(float) * area * K * 4 ) );
HANDLE_ERROR( hipMemset( devMsg, 0, sizeof(float) * area * K * 4 ) );
// begins and ends
int longer = height > width ? height : width;
int *devBegins = NULL;
int *devEnds = NULL;
int begins[longer];
int ends[longer];
HANDLE_ERROR( hipMalloc( (void**) &devBegins, sizeof(int) * longer ) );
HANDLE_ERROR( hipMalloc( (void**) &devEnds, sizeof(int) * longer ) );
// Result
int *devResult = NULL;
HANDLE_ERROR( hipMalloc( (void**) &devResult, sizeof(int) * area * K ) );
// Intermediate Distance Calculation
float *devDistance = NULL;
HANDLE_ERROR( hipMalloc( (void**) &devDistance, sizeof(float) * K * K * 4 * area ) );
dim3 precompute_grid( ( area + 1 ) / 8, 4 );
dim3 precompute_block( 8, K, K );
hipLaunchKernelGGL(( Precomputing_agent_float), dim3(precompute_grid), dim3(precompute_block), 0, 0, area,
devLabel,
devDistance,
dim,
K );
HANDLE_ERROR( hipDeviceSynchronize() );
double energy(0);
for ( int iter=0; iter<options.maxIter; iter++ ) {
for ( int dirID=0; dirID<4; dirID++ ) {
int dir = order[dirID];
// fill in begins and ends
int agentNum = ( 0 == ( dir & 1 ) ) ? width : height;
if ( 0 == dir ) {
// UP:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan + width * ( height - 1 );
ends[scan] = scan;
}
} else if ( 1 == dir ) {
// LEFT:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = width * scan + width - 1;
ends[scan] = width * scan;
}
} else if ( 2 == dir ) {
// DOWN:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan;
ends[scan] = scan + width * ( height - 1 );
}
} else if ( 3 == dir ) {
// RIGHT:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan * width;
ends[scan] = scan * width + width - 1;
}
}
HANDLE_ERROR( hipMemcpy( devBegins, begins, sizeof(int) * agentNum, hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( devEnds, ends, sizeof(int) * agentNum, hipMemcpyHostToDevice ) );
// call Kernel Function 1
int blockSize = 8;
dim3 blockDim( blockSize, K );
int shMemSizePerBlock= blockSize * K * sizeof(float);
hipLaunchKernelGGL(( UpdateMessage_device), dim3((agentNum/blockSize)+1),dim3(blockDim), shMemSizePerBlock, 0, agentNum,
devD,
devMsg,
devDistance,
devBegins,
devEnds,
K,
options.lambda,
dim, dir, inc[dir],
incK[dir],
area );
HANDLE_ERROR( hipDeviceSynchronize() );
hipLaunchKernelGGL(( NormalizeMsg_device), dim3((width*height*4)/128+1),dim3(128), 0, 0, width * height * 4,
devMsg,
K );
if ( 1 <= options.verbose ) {
hipLaunchKernelGGL(( UpdateResult_device), dim3((width*height+1)/64),dim3(64), 0, 0, width * height,
devD,
devMsg,
devResult,
K );
HANDLE_ERROR( hipMemcpy( result, devResult, sizeof(int) * width * height, hipMemcpyDeviceToHost ) );
energy = UpdateEnergy( D, label, height, width, K, dim, options.lambda, result );
printf( "Iteration %d: energy = %.5lf\n", iter, energy );
}
} // end for dir
}
hipLaunchKernelGGL(( UpdateResult_device), dim3((width*height)/64+1),dim3(64), 0, 0, width * height,
devD,
devMsg,
devResult,
K );
HANDLE_ERROR( hipMemcpy( result, devResult, sizeof(int) * width * height, hipMemcpyDeviceToHost ) );
energy = UpdateEnergy( D, label, height, width, K, dim, options.lambda, result );
printf( "Final energy = %.5lf\n", energy );
// Free Cuda Memory
if ( devD ) HANDLE_ERROR( hipFree( devD ) );
if ( devLabel ) HANDLE_ERROR( hipFree( devLabel ) );
if ( devMsg ) HANDLE_ERROR( hipFree( devMsg ) );
if ( devBegins ) HANDLE_ERROR( hipFree( devBegins ) );
if ( devEnds ) HANDLE_ERROR( hipFree( devEnds ) );
if ( devResult ) HANDLE_ERROR( hipFree( devResult ) );
if ( devDistance ) HANDLE_ERROR( hipFree( devDistance ) );
HANDLE_ERROR( hipDeviceSynchronize() );
return energy;
}
};
| 1927ca6e9baf00d108a4274eb4ec9f3211aad910.cu | /*********************************************************************************
* File: BP_CUDA.cu
* Description: Belief Propagation for Ising Model, CUDA parallelized version
* by BreakDS, @ University of Wisconsin-Madison, Fri Aug 31 15:25:19 CDT 2012
*********************************************************************************/
#include "BP_CUDA.h"
#include <cuda.h>
#include "../utils/cuda_aux.hpp"
int host_patch_side;
namespace optimize_cuda
{
__constant__ int g_inc[4];
__constant__ float g_coeff[6];
__constant__ int g_patch_side;
// This function calculate the distance between vector
// ( a0, ..., a5 ) and ( b0, ..., b5 )
// direction should be specified
// __device__ inline float dist_device( const float a0, const float a1,
// const float a2, const float a3,
// const float a4, const float a5,
// const float b0, const float b1,
// const float b2, const float b3,
// const float b4, const float b5,
// int direction )
// {
// float tmp;
// // [0] = image index
// tmp = fabsf( b0 - a0 );
// if ( tmp > 0.5 ) return 150000.0;
// // [1], [2] = dy, dx (rotation representation)
// tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
// if ( tmp > 1.0 ) {
// return 150000.0;
// }
// // [4],[5] = spatial distance
// // Should be compensated by the rotation (dy,dx)
// // Note that dy = b1 dx = b2 by definition
// float ay(a4), ax(a5);
// if ( 0 == direction ) {
// ay -= b2;
// ax -= b1;
// } else if ( 1 == direction ) {
// ay += b1;
// ax -= b2;
// } else if ( 2 == direction ) {
// ay += b2;
// ax += b1;
// } else if ( 3 == direction ) {
// ay -= b1;
// ax += b2;
// }
// tmp = fabsf( ay - b4 ) + fabsf( ax - b5 );
// if ( tmp > g_patch_side ) {
// return 150000.0;
// }
// // [1],[2] = spatial distance
// float sum = tmp * g_coeff[4];
// sum += fabsf( b1 - a1 ) * g_coeff[1];
// sum += fabsf( b2 - a2 ) * g_coeff[2];
// sum += fabsf( b3 - a3 ) * g_coeff[3];
// return sum;
// }
// // The host version of dist_device()
// inline float dist_host( const float a0, const float a1,
// const float a2, const float a3,
// const float a4, const float a5,
// const float b0, const float b1,
// const float b2, const float b3,
// const float b4, const float b5,
// int direction )
// {
// const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
// float tmp;
// // [0] = image index
// tmp = fabsf( b0 - a0 );
// if ( tmp > 1.0 ) return 150000.0;
// // [1], [2] = dy, dx (rotation representation)
// tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
// if ( tmp > 1.0 ) {
// return 150000.0;
// }
// // [4],[5] = spatial distance
// // Should be compensated by the rotation (dy,dx)
// // Note that dy = b1 dx = b2 by definition
// float ay(a4), ax(a5);
// if ( 0 == direction ) {
// ay -= b2;
// ax -= b1;
// } else if ( 1 == direction ) {
// ay += b1;
// ax -= b2;
// } else if ( 2 == direction ) {
// ay += b2;
// ax += b1;
// } else if ( 3 == direction ) {
// ay -= b1;
// ax += b2;
// }
// tmp = fabsf( ay - b4 ) + fabsf( ax - b5 );
// if ( tmp > host_patch_side ) {
// return 150000.0;
// }
// // [1],[2] = spatial distance
// float sum = tmp * coeff[4];
// sum += fabsf( b1 - a1 ) * coeff[1];
// sum += fabsf( b2 - a2 ) * coeff[2];
// sum += fabsf( b3 - a3 ) * coeff[3];
// return sum;
// }
__device__ inline float dist_device( const float a0, const float a1,
const float a2, const float a3,
const float a4, const float a5,
const float b0, const float b1,
const float b2, const float b3,
const float b4, const float b5 )
{
float tmp;
// [0] = image index
tmp = fabsf( b0 - a0 );
if ( tmp > 0.5 ) return 500.0;
tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
if ( tmp > 1.5 ) {
return 500.0;
}
tmp = 0.0f;
tmp += fabsf( b1 - a1 ) * g_coeff[1];
tmp += fabsf( b2 - a2 ) * g_coeff[2];
tmp += fabsf( b4 - a4 ) * g_coeff[4];
tmp += fabsf( b5 - a5 ) * g_coeff[5];
return tmp;
}
// The host version of dist_device()
inline float dist_host( const float a0, const float a1,
const float a2, const float a3,
const float a4, const float a5,
const float b0, const float b1,
const float b2, const float b3,
const float b4, const float b5 )
{
const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
float tmp;
// [0] = image index
tmp = fabsf( b0 - a0 );
if ( tmp > 1.0 ) return 500.0;
tmp = fabsf( a1 - b1 ) + fabsf( a2 - b2 );
if ( tmp > 1.5 ) {
return 500.0;
}
tmp = 0.0f;
tmp += fabsf( b1 - a1 ) * coeff[1];
tmp += fabsf( b2 - a2 ) * coeff[2];
tmp += fabsf( b4 - a4 ) * coeff[4];
tmp += fabsf( b5 - b5 ) * coeff[5];
return tmp;
}
// normalize messages of a node (pixel) with the same direction (sum to 0)
// agentNum: 4 * #pixel (4 = number of directions)
// msg: the message array [dir(4), pixel(#pixel), k(K)]
// K: # of candidate labels
__global__ void NormalizeMsg_device( const int agentNum,
float *msg,
int K )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
float offset = 0.0f;
for ( int k=0; k<K; k++ ) {
offset += msg[ idx * K + k ];
}
offset /= K;
for ( int k=0; k<K; k++ ) {
msg[ idx * K + k ] -= offset;
}
}
}
// Update the result array (one cadidate label id for each pixel)
// agentNum: # of pixel
// D: self distance matrix [pixel(# of pixels), k(K)];
// msg: the message array [dir(4), pixel(# of pixels), k(K)]
// result: the result array [pixel(# of pixels)]
// K: # of labels per pixel
__global__ void UpdateResult_device( const int agentNum, const float *D,
const float* msg,
int *result, int K )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
result[idx] = 0;
float min = 0.0f;
for ( int k=0; k<K; k++ ) {
float sum = D[idx*K+k];
for ( int dir=0; dir<4; dir++ ) {
sum += msg[ ( dir * agentNum + idx ) * K + k ];
}
if ( 0 == k ) {
min = sum;
} else if ( sum < min ) {
min = sum;
result[idx] = k;
}
}
}
}
double UpdateEnergy( const float *D, const float *label, int height, int width, int K,
int dim, float lambda, int *result )
{
// constants
const int inc[4] = { -width, -1, width, 1 };
const int incDim[4] = {-width*K*dim,-K*dim,width*K*dim,K*dim};
double energy = 0.0;
int i = 0;
const float *labelp = label;
for ( int y=0; y<height; y++ ) {
for ( int x=0; x<width; x++ ) {
energy += D[i*K+result[i]];
// UP:
int d = 0;
if ( y > 0 ) {
const float *lp0 = labelp + result[i] * dim;
const float *lp1 = labelp + incDim[d] + result[i+inc[d]] * dim;
energy += dist_host( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] ) * lambda;
}
// LEFT:
d = 1;
if ( x > 0 ) {
const float *lp0 = labelp + result[i] * dim;
const float *lp1 = labelp + incDim[d] + result[i+inc[d]] * dim;
energy += dist_host( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] ) * lambda;
}
i++;
labelp += K * dim;
}
}
return energy;
}
// Main Kernel Function, which updates messages
// Currently this function takes 82.3% of the total cuda computation time
//
// agentNum: columns or rows, depnding on the direction
// D: self distance matrix [pixel(# of pixels), k(K)];
// msg: the message array [dir(4), pixel(#pixel), k(K)]
// distance: inter-pixel label distance matrix [k(K), dir(4), pixel(# of pixels)]
// begins: array [pixel(agentNum)], where begins[i] is the first pixel of agent i
// ends: array [pixel(agentNum)], where ends[i] is the last (exluded) pixel of agent i
// K: # of candidate labels
// lambda: coeffiecient of the inter-pixel label distance term
// dim: dimension of a label
// dir: current propagation direction
// inc: pixel increment in this direction
// incK: pixel increment, K times as larger as inc
// area: # of pixels
__global__ void UpdateMessage_device( int agentNum, // number of agents needes
float *D,
float* msg,
float* distance,
int *begins,
int *ends,
int K,
float lambda,
int dim,
int dir,
int inc,
int incK,
int area )
{
extern __shared__ float buf[];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx < agentNum ) {
int k = threadIdx.y;
float *h = buf;
int opp = (dir+2) & 3;
float *Dp = D + begins[idx] * K;
int end = ends[idx];
for ( int i=begins[idx]; i!=end; i+=inc, Dp+=incK ) {
h[ k * blockDim.x + threadIdx.x ] = Dp[k];
for ( int j=0; j<4; j++ ) {
if ( j != opp ) {
h[ k * blockDim.x + threadIdx.x ] += msg[ j * area * K + i * K + k ];
}
}
__syncthreads();
float min = distance[ ( k * 4 + dir ) * area + i ] * lambda + h[ threadIdx.x ];
for ( int k0=1; k0<K; k0++ ) {
__syncthreads();
float value = distance[ ( ( k0 * K + k ) * 4 + dir ) * area + i ] * lambda +
h[ k0 * blockDim.x + threadIdx.x];
if ( value < min ) min = value;
}
msg[ dir * area * K + ( i + inc ) * K + k ] = min;
} // end for i
}
}
// Precompute the distance matrix that will be heavily used later
// area: # of pixels, also serve as the agentNum
// label: the label matrix [pixel(area), k(K), component(dim)]
// distance: inter-pixel label distance matrix [k(K), dir(4), pixel(# of pixels)]
// dim: dimension of each label
// K: # of candidate labels per pixel
__global__ void Precomputing_agent_float( int area,
float *label,
float *distance,
int dim,
int K )
{
int pixel = blockDim.x * blockIdx.x + threadIdx.x;
if ( pixel < area ) {
int dir = blockIdx.y;
// int k0 = threadIdx.y;
// int k1 = threadIdx.z;
int idx = ( ( threadIdx.y * K + threadIdx.z ) * 4 + dir ) * area + pixel;
// overflow checking
if ( pixel + g_inc[dir] < 0 || pixel + g_inc[dir] >= area ) {
return ;
}
float *lp0 = label + ( pixel * K + threadIdx.y ) * dim;
float *lp1 = label + ( ( pixel + g_inc[dir] ) * K + threadIdx.z ) * dim;
distance[idx] = dist_device( lp0[0], lp0[1], lp0[2], lp0[3], lp0[4], lp0[5],
lp1[0], lp1[1], lp1[2], lp1[3], lp1[4], lp1[5] );
}
}
double LoopyBP( const float *D,
const float *label,
const int height,
const int width,
const int K,
const int dim,
int *result,
Options options, float* msgBuf )
{
const float coeff[6] = { 0.0, 30.0, 30.0, 10.0, 1.0, 1.0 };
HANDLE_ERROR( cudaMemcpyToSymbol( optimize_cuda::g_coeff, &coeff, sizeof(float) * 6, 0, cudaMemcpyHostToDevice ) );
int patchSide = 17;
HANDLE_ERROR( cudaMemcpyToSymbol( optimize_cuda::g_patch_side, &patchSide, sizeof(int), 0, cudaMemcpyHostToDevice ) );
host_patch_side = patchSide;
const int inc[4] = { -width, -1, width, 1 };
HANDLE_ERROR( cudaMemcpyToSymbol( optimize_cuda::g_inc, inc, sizeof(int) * 4, 0, cudaMemcpyHostToDevice ) );
const int incK[4] = {-width*K,-K,width*K,K};
const int order[4] = {2,0,3,1}; // DOWN, UP, RIGHT, LEFT
const int area = height * width;
// Make sure that the message storage is allocated
float *buf = msgBuf;
if ( NULL == buf ) {
// Message buffer is not provided externally
buf = new float[4*width*height*K];
}
// Initialization of device memory
// Data term array
float *devD = NULL;
HANDLE_ERROR( cudaMalloc( (void**) &devD, sizeof(float) * area * K ) );
HANDLE_ERROR( cudaMemcpy( devD, D, sizeof(float) * area * K, cudaMemcpyHostToDevice ) );
// Labels array
float *devLabel = NULL;
HANDLE_ERROR( cudaMalloc( (void**) &devLabel, sizeof(float) * area * K * dim ) );
HANDLE_ERROR( cudaMemcpy( devLabel, label, sizeof(float) * area * K * dim, cudaMemcpyHostToDevice ) );
// Messages
// After these lines, msg[0] = up, msg[1] = left, msg[2] = right, msg[3] = down
float *devMsg = NULL;
HANDLE_ERROR( cudaMalloc( (void**) &devMsg, sizeof(float) * area * K * 4 ) );
HANDLE_ERROR( cudaMemset( devMsg, 0, sizeof(float) * area * K * 4 ) );
// begins and ends
int longer = height > width ? height : width;
int *devBegins = NULL;
int *devEnds = NULL;
int begins[longer];
int ends[longer];
HANDLE_ERROR( cudaMalloc( (void**) &devBegins, sizeof(int) * longer ) );
HANDLE_ERROR( cudaMalloc( (void**) &devEnds, sizeof(int) * longer ) );
// Result
int *devResult = NULL;
HANDLE_ERROR( cudaMalloc( (void**) &devResult, sizeof(int) * area * K ) );
// Intermediate Distance Calculation
float *devDistance = NULL;
HANDLE_ERROR( cudaMalloc( (void**) &devDistance, sizeof(float) * K * K * 4 * area ) );
dim3 precompute_grid( ( area + 1 ) / 8, 4 );
dim3 precompute_block( 8, K, K );
Precomputing_agent_float<<<precompute_grid, precompute_block>>>( area,
devLabel,
devDistance,
dim,
K );
HANDLE_ERROR( cudaDeviceSynchronize() );
double energy(0);
for ( int iter=0; iter<options.maxIter; iter++ ) {
for ( int dirID=0; dirID<4; dirID++ ) {
int dir = order[dirID];
// fill in begins and ends
int agentNum = ( 0 == ( dir & 1 ) ) ? width : height;
if ( 0 == dir ) {
// UP:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan + width * ( height - 1 );
ends[scan] = scan;
}
} else if ( 1 == dir ) {
// LEFT:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = width * scan + width - 1;
ends[scan] = width * scan;
}
} else if ( 2 == dir ) {
// DOWN:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan;
ends[scan] = scan + width * ( height - 1 );
}
} else if ( 3 == dir ) {
// RIGHT:
for ( int scan=0; scan<agentNum; scan++ ) {
begins[scan] = scan * width;
ends[scan] = scan * width + width - 1;
}
}
HANDLE_ERROR( cudaMemcpy( devBegins, begins, sizeof(int) * agentNum, cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( devEnds, ends, sizeof(int) * agentNum, cudaMemcpyHostToDevice ) );
// call Kernel Function 1
int blockSize = 8;
dim3 blockDim( blockSize, K );
int shMemSizePerBlock= blockSize * K * sizeof(float);
UpdateMessage_device<<<(agentNum/blockSize)+1,blockDim, shMemSizePerBlock>>>( agentNum,
devD,
devMsg,
devDistance,
devBegins,
devEnds,
K,
options.lambda,
dim, dir, inc[dir],
incK[dir],
area );
HANDLE_ERROR( cudaDeviceSynchronize() );
NormalizeMsg_device<<<(width*height*4)/128+1,128>>>( width * height * 4,
devMsg,
K );
if ( 1 <= options.verbose ) {
UpdateResult_device<<<(width*height+1)/64,64>>>( width * height,
devD,
devMsg,
devResult,
K );
HANDLE_ERROR( cudaMemcpy( result, devResult, sizeof(int) * width * height, cudaMemcpyDeviceToHost ) );
energy = UpdateEnergy( D, label, height, width, K, dim, options.lambda, result );
printf( "Iteration %d: energy = %.5lf\n", iter, energy );
}
} // end for dir
}
UpdateResult_device<<<(width*height)/64+1,64>>>( width * height,
devD,
devMsg,
devResult,
K );
HANDLE_ERROR( cudaMemcpy( result, devResult, sizeof(int) * width * height, cudaMemcpyDeviceToHost ) );
energy = UpdateEnergy( D, label, height, width, K, dim, options.lambda, result );
printf( "Final energy = %.5lf\n", energy );
// Free Cuda Memory
if ( devD ) HANDLE_ERROR( cudaFree( devD ) );
if ( devLabel ) HANDLE_ERROR( cudaFree( devLabel ) );
if ( devMsg ) HANDLE_ERROR( cudaFree( devMsg ) );
if ( devBegins ) HANDLE_ERROR( cudaFree( devBegins ) );
if ( devEnds ) HANDLE_ERROR( cudaFree( devEnds ) );
if ( devResult ) HANDLE_ERROR( cudaFree( devResult ) );
if ( devDistance ) HANDLE_ERROR( cudaFree( devDistance ) );
HANDLE_ERROR( cudaDeviceSynchronize() );
return energy;
}
};
|
3c5c6344528927256398ed1a2796b90d206f9bed.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=3) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
| 3c5c6344528927256398ed1a2796b90d206f9bed.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=3) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
7da2439c288092d3ce8764f34407cad0620834c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
__global__ void kmer(char *reference_str, char *reads, int reference_len, int read_len, int k, int *read_results) {
int i = blockIdx.x;
int j = threadIdx.x;
if (j + k <= read_len) {
int count = 0;
int loc = i * MAX_READ_LENGTH + j;
for (int p = 0; p <= reference_len - k; p++) {
int equal = 1;
for (int t = 0; t < k; t++)
if (reads[loc + t] != reference_str[p + t])
equal = 0;
count += equal;
}
read_results[loc] = count;
}
}
int main(int argc, char** argv)
{
if(argc != 5) {
printf("Wrong argments usage: ./kmer_parallel [REFERENCE_FILE] [READ_FILE] [k] [OUTPUT_FILE]\n" );
}
FILE *fp;
int k;
char *reference_str = (char*) malloc(MAX_REF_LENGTH * sizeof(char));
char *reference_filename, *read_filename, *output_filename;
reference_filename = argv[1];
read_filename = argv[2];
k = atoi(argv[3]);
output_filename = argv[4];
fp = fopen(reference_filename, "r");
if (fp == NULL) {
printf("Could not open file %s!\n",reference_filename);
return 1;
}
if (fgets(reference_str, MAX_REF_LENGTH, fp) == NULL) { //A single line only
printf("Problem in file format!\n");
return 1;
}
substring(reference_str, 0, strlen(reference_str)-1);
fclose(fp);
//Read queries
StringList queries;
initStringList(&queries, 3); // initially 3 elements
int success = read_file(read_filename, &queries);
int read_len = strlen(queries.array[0]) - 1;
int reference_len = strlen(reference_str);
char *d_reads;
char *d_reference_str;
int *d_read_results;
hipMalloc(&d_reads, queries.size * MAX_READ_LENGTH * sizeof(char));
hipMalloc(&d_read_results, queries.size * MAX_READ_LENGTH * sizeof(int));
hipMalloc(&d_reference_str, reference_len * sizeof(char));
char *reads_1d = (char*) malloc(queries.size * MAX_READ_LENGTH * sizeof(char));
for (int i = 0; i < queries.size; i++)
for (int j = 0; j < MAX_READ_LENGTH; j++)
reads_1d[i * MAX_READ_LENGTH + j] = queries.array[i][j];
hipMemcpy(d_reads, reads_1d, queries.size * MAX_READ_LENGTH * sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(d_reference_str, reference_str, reference_len * sizeof(char), hipMemcpyHostToDevice);
int n_blocks = queries.used;
int n_threads = 256;
hipLaunchKernelGGL(( kmer), dim3(n_blocks),dim3(n_threads), 0, 0, d_reference_str, d_reads, reference_len, read_len, k, d_read_results);
hipDeviceSynchronize();
int *read_results = (int*) malloc(queries.size * MAX_READ_LENGTH * sizeof(int));
hipMemcpy(read_results, d_read_results, queries.size * MAX_READ_LENGTH * sizeof(int), hipMemcpyDeviceToHost);
fp = fopen(output_filename, "w");
for (int q = 0; q < queries.used; q++) {
int count = 0;
for (int i = 0; i < read_len; i++)
count += read_results[q * MAX_READ_LENGTH + i];
fprintf(fp, "%d\n", count);
}
// free allocated memory
hipFree(d_reads);
hipFree(d_read_results);
hipFree(d_reference_str);
freeStringList(&queries);
free(reference_str);
return 0;
}
| 7da2439c288092d3ce8764f34407cad0620834c4.cu | #include "util.h"
__global__ void kmer(char *reference_str, char *reads, int reference_len, int read_len, int k, int *read_results) {
int i = blockIdx.x;
int j = threadIdx.x;
if (j + k <= read_len) {
int count = 0;
int loc = i * MAX_READ_LENGTH + j;
for (int p = 0; p <= reference_len - k; p++) {
int equal = 1;
for (int t = 0; t < k; t++)
if (reads[loc + t] != reference_str[p + t])
equal = 0;
count += equal;
}
read_results[loc] = count;
}
}
int main(int argc, char** argv)
{
if(argc != 5) {
printf("Wrong argments usage: ./kmer_parallel [REFERENCE_FILE] [READ_FILE] [k] [OUTPUT_FILE]\n" );
}
FILE *fp;
int k;
char *reference_str = (char*) malloc(MAX_REF_LENGTH * sizeof(char));
char *reference_filename, *read_filename, *output_filename;
reference_filename = argv[1];
read_filename = argv[2];
k = atoi(argv[3]);
output_filename = argv[4];
fp = fopen(reference_filename, "r");
if (fp == NULL) {
printf("Could not open file %s!\n",reference_filename);
return 1;
}
if (fgets(reference_str, MAX_REF_LENGTH, fp) == NULL) { //A single line only
printf("Problem in file format!\n");
return 1;
}
substring(reference_str, 0, strlen(reference_str)-1);
fclose(fp);
//Read queries
StringList queries;
initStringList(&queries, 3); // initially 3 elements
int success = read_file(read_filename, &queries);
int read_len = strlen(queries.array[0]) - 1;
int reference_len = strlen(reference_str);
char *d_reads;
char *d_reference_str;
int *d_read_results;
cudaMalloc(&d_reads, queries.size * MAX_READ_LENGTH * sizeof(char));
cudaMalloc(&d_read_results, queries.size * MAX_READ_LENGTH * sizeof(int));
cudaMalloc(&d_reference_str, reference_len * sizeof(char));
char *reads_1d = (char*) malloc(queries.size * MAX_READ_LENGTH * sizeof(char));
for (int i = 0; i < queries.size; i++)
for (int j = 0; j < MAX_READ_LENGTH; j++)
reads_1d[i * MAX_READ_LENGTH + j] = queries.array[i][j];
cudaMemcpy(d_reads, reads_1d, queries.size * MAX_READ_LENGTH * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_reference_str, reference_str, reference_len * sizeof(char), cudaMemcpyHostToDevice);
int n_blocks = queries.used;
int n_threads = 256;
kmer<<<n_blocks,n_threads>>>(d_reference_str, d_reads, reference_len, read_len, k, d_read_results);
cudaDeviceSynchronize();
int *read_results = (int*) malloc(queries.size * MAX_READ_LENGTH * sizeof(int));
cudaMemcpy(read_results, d_read_results, queries.size * MAX_READ_LENGTH * sizeof(int), cudaMemcpyDeviceToHost);
fp = fopen(output_filename, "w");
for (int q = 0; q < queries.used; q++) {
int count = 0;
for (int i = 0; i < read_len; i++)
count += read_results[q * MAX_READ_LENGTH + i];
fprintf(fp, "%d\n", count);
}
// free allocated memory
cudaFree(d_reads);
cudaFree(d_read_results);
cudaFree(d_reference_str);
freeStringList(&queries);
free(reference_str);
return 0;
}
|
878d66687165702f4e8db3ac518b28de862e5d1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include "orc_common.h"
#include "orc_gpu.h"
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
// Must be able to handle 512x 8-byte values. These values are base 128 encoded
// so 8 byte value is expanded to 10 bytes.
constexpr int bytestream_buffer_size = 512 * 8 * 2;
constexpr int bytestream_buffer_mask = (bytestream_buffer_size - 1) >> 2;
// TODO: Should be more efficient with 512 threads per block and circular queue for values
constexpr int num_warps = 32;
constexpr int block_size = 32 * num_warps;
// Add some margin to look ahead to future rows in case there are many zeroes
constexpr int row_decoder_buffer_size = block_size + 128;
inline __device__ uint8_t is_rlev1(uint8_t encoding_mode) { return encoding_mode < DIRECT_V2; }
inline __device__ uint8_t is_dictionary(uint8_t encoding_mode) { return encoding_mode & 1; }
static __device__ __constant__ int64_t kORCTimeToUTC =
1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct int128_s {
uint64_t lo;
int64_t hi;
};
struct orc_bytestream_s {
const uint8_t *base;
uint32_t pos;
uint32_t len;
uint32_t fill_pos;
uint32_t fill_count;
union {
uint8_t u8[bytestream_buffer_size];
uint32_t u32[bytestream_buffer_size >> 2];
uint2 u64[bytestream_buffer_size >> 3];
} buf;
};
struct orc_rlev1_state_s {
uint32_t num_runs;
uint32_t num_vals;
int32_t run_data[num_warps * 12]; // (delta << 24) | (count << 16) | (first_val)
};
struct orc_rlev2_state_s {
uint32_t num_runs;
uint32_t num_vals;
union {
uint32_t u32[num_warps];
uint64_t u64[num_warps];
} baseval;
uint16_t m2_pw_byte3[num_warps];
int64_t delta[num_warps];
uint16_t runs_loc[block_size];
};
struct orc_byterle_state_s {
uint32_t num_runs;
uint32_t num_vals;
uint32_t runs_loc[num_warps];
uint32_t runs_pos[num_warps];
};
struct orc_rowdec_state_s {
uint32_t nz_count;
uint32_t row[row_decoder_buffer_size]; // 0=skip, >0: row position relative to cur_row
};
struct orc_strdict_state_s {
DictionaryEntry *local_dict;
uint32_t dict_pos;
uint32_t dict_len;
};
struct orc_datadec_state_s {
uint32_t cur_row; // starting row of current batch
uint32_t end_row; // ending row of this chunk (start_row + num_rows)
uint32_t max_vals; // max # of non-zero values to decode in this batch
uint32_t nrows; // # of rows in current batch (up to block_size)
uint32_t buffered_count; // number of buffered values in the secondary data stream
int64_t utc_epoch; // kORCTimeToUTC - gmtOffset
RowGroup index;
};
struct orcdec_state_s {
ColumnDesc chunk;
orc_bytestream_s bs;
orc_bytestream_s bs2;
int is_string;
union {
orc_strdict_state_s dict;
uint32_t nulls_desc_row; // number of rows processed for nulls.
orc_datadec_state_s data;
} top;
union {
orc_rlev1_state_s rlev1;
orc_rlev2_state_s rlev2;
orc_byterle_state_s rle8;
orc_rowdec_state_s rowdec;
} u;
union values {
uint8_t u8[block_size * 8];
uint32_t u32[block_size * 2];
int32_t i32[block_size * 2];
uint64_t u64[block_size];
int64_t i64[block_size];
double f64[block_size];
} vals;
};
/**
* @brief Initializes byte stream, modifying length and start position to keep the read pointer
* 8-byte aligned.
*
* Assumes that the address range [start_address & ~7, (start_address + len - 1) | 7]
* is valid.
*
* @param[in,out] bs Byte stream input
* @param[in] base Pointer to raw byte stream data
* @param[in] len Stream length in bytes
*/
static __device__ void bytestream_init(volatile orc_bytestream_s *bs,
const uint8_t *base,
uint32_t len)
{
uint32_t pos = static_cast<uint32_t>(7 & reinterpret_cast<size_t>(base));
bs->base = base - pos;
bs->pos = (len > 0) ? pos : 0;
bs->len = (len + pos + 7) & ~7;
bs->fill_pos = 0;
bs->fill_count = min(bs->len, bytestream_buffer_size) >> 3;
}
/**
* @brief Increment the read position, returns number of 64-bit slots to fill
*
* @param[in] bs Byte stream input
* @param[in] bytes_consumed Number of bytes that were consumed
*/
static __device__ void bytestream_flush_bytes(volatile orc_bytestream_s *bs,
uint32_t bytes_consumed)
{
uint32_t pos = bs->pos;
uint32_t len = bs->len;
uint32_t pos_new = min(pos + bytes_consumed, len);
bs->pos = pos_new;
pos = min(pos + bytestream_buffer_size, len);
pos_new = min(pos_new + bytestream_buffer_size, len);
bs->fill_pos = pos;
bs->fill_count = (pos_new >> 3) - (pos >> 3);
}
/**
* @brief Refill the byte stream buffer
*
* @param[in] bs Byte stream input
* @param[in] t thread id
*/
static __device__ void bytestream_fill(orc_bytestream_s *bs, int t)
{
auto const count = bs->fill_count;
if (t < count) {
auto const pos8 = (bs->fill_pos >> 3) + t;
memcpy(&bs->buf.u64[pos8 & ((bytestream_buffer_size >> 3) - 1)],
&bs->base[pos8 * sizeof(uint2)],
sizeof(uint2));
}
}
/**
* @brief Read a byte from the byte stream (byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @return byte
*/
inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s *bs, int pos)
{
return bs->buf.u8[pos & (bytestream_buffer_size - 1)];
}
/**
* @brief Read 32 bits from a byte stream (little endian, byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @result bits
*/
inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s *bs, int pos)
{
uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2];
uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2];
return __funnelshift_r(a, b, (pos & 3) * 8);
}
/**
* @brief Read 64 bits from a byte stream (little endian, byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @param[in] numbits number of bits
* @return bits
*/
inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s *bs, int pos)
{
uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2];
uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2];
uint32_t c = bs->buf.u32[((pos + 8) & (bytestream_buffer_size - 1)) >> 2];
uint32_t lo32 = __funnelshift_r(a, b, (pos & 3) * 8);
uint32_t hi32 = __funnelshift_r(b, c, (pos & 3) * 8);
uint64_t v = hi32;
v <<= 32;
v |= lo32;
return v;
}
/**
* @brief Read up to 32-bits from a byte stream (big endian)
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @return decoded value
*/
inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits)
{
int idx = bitpos >> 5;
uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123);
uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & bytestream_buffer_mask], 0, 0x0123);
return __funnelshift_l(b, a, bitpos & 0x1f) >> (32 - numbits);
}
/**
* @brief Read up to 64-bits from a byte stream (big endian)
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @return decoded value
*/
inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits)
{
int idx = bitpos >> 5;
uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123);
uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & bytestream_buffer_mask], 0, 0x0123);
uint32_t c = __byte_perm(bs->buf.u32[(idx + 2) & bytestream_buffer_mask], 0, 0x0123);
uint32_t hi32 = __funnelshift_l(b, a, bitpos & 0x1f);
uint32_t lo32 = __funnelshift_l(c, b, bitpos & 0x1f);
uint64_t v = hi32;
v <<= 32;
v |= lo32;
v >>= (64 - numbits);
return v;
}
/**
* @brief Decode a big-endian unsigned 32-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
uint32_t &result)
{
result = bytestream_readbits(bs, bitpos, numbits);
}
/**
* @brief Decode a big-endian signed 32-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
int32_t &result)
{
uint32_t u = bytestream_readbits(bs, bitpos, numbits);
result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
/**
* @brief Decode a big-endian unsigned 64-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
uint64_t &result)
{
result = bytestream_readbits64(bs, bitpos, numbits);
}
/**
* @brief Decode a big-endian signed 64-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
int64_t &result)
{
uint64_t u = bytestream_readbits64(bs, bitpos, numbits);
result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
}
/**
* @brief Return the length of a base-128 varint
*
* @param[in] bs Byte stream input
* @param[in] pos Position in circular byte stream buffer
* @return length of varint in bytes
*/
template <class T>
inline __device__ uint32_t varint_length(volatile orc_bytestream_s *bs, int pos)
{
if (bytestream_readbyte(bs, pos) > 0x7f) {
uint32_t next32 = bytestream_readu32(bs, pos + 1);
uint32_t zbit = __ffs((~next32) & 0x80808080);
if (sizeof(T) <= 4 || zbit) {
return 1 + (zbit >> 3); // up to 5x7 bits
} else {
next32 = bytestream_readu32(bs, pos + 5);
zbit = __ffs((~next32) & 0x80808080);
if (zbit) {
return 5 + (zbit >> 3); // up to 9x7 bits
} else if ((sizeof(T) <= 8) || (bytestream_readbyte(bs, pos + 9) <= 0x7f)) {
return 10; // up to 70 bits
} else {
uint64_t next64 = bytestream_readu64(bs, pos + 10);
zbit = __ffsll((~next64) & 0x8080808080808080ull);
if (zbit) {
return 10 + (zbit >> 3); // Up to 18x7 bits (126)
} else {
return 19; // Up to 19x7 bits (133)
}
}
}
} else {
return 1;
}
}
/**
* @brief Decodes a base-128 varint
*
* @param[in] bs Byte stream input
* @param[in] pos Position in circular byte stream buffer
* @param[in] result Unpacked value
* @return new position in byte stream buffer
*/
template <class T>
inline __device__ int decode_base128_varint(volatile orc_bytestream_s *bs, int pos, T &result)
{
uint32_t v = bytestream_readbyte(bs, pos++);
if (v > 0x7f) {
uint32_t b = bytestream_readbyte(bs, pos++);
v = (v & 0x7f) | (b << 7);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x3fff) | (b << 14);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x1fffff) | (b << 21);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x0fffffff) | (b << 28);
if (sizeof(T) > 4) {
uint32_t lo = v;
uint64_t hi;
v = b >> 4;
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 7) | (b << 3);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x3ff) | (b << 10);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x1ffff) | (b << 17);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0xffffff) | (b << 24);
if (b > 0x7f) {
pos++; // last bit is redundant (extra byte implies bit63 is 1)
}
}
}
}
}
hi = v;
hi <<= 32;
result = hi | lo;
return pos;
}
}
}
}
}
result = v;
return pos;
}
/**
* @brief Decodes a signed int128 encoded as base-128 varint (used for decimals)
*/
inline __device__ int128_s decode_varint128(volatile orc_bytestream_s *bs, int pos)
{
uint32_t b = bytestream_readbyte(bs, pos++);
int64_t sign_mask = -(int32_t)(b & 1);
uint64_t v = (b >> 1) & 0x3f;
uint32_t bitpos = 6;
uint64_t lo = v;
uint64_t hi = 0;
while (b > 0x7f && bitpos < 128) {
b = bytestream_readbyte(bs, pos++);
v |= ((uint64_t)(b & 0x7f)) << (bitpos & 0x3f);
if (bitpos == 62) { // 6 + 7 * 8 = 62
lo = v;
v = (b & 0x7f) >> 2; // 64 - 62
}
bitpos += 7;
}
if (bitpos >= 64) {
hi = v;
} else {
lo = v;
}
return {(uint64_t)(lo ^ sign_mask), (int64_t)(hi ^ sign_mask)};
}
/**
* @brief Decodes an unsigned 32-bit varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint32_t &result)
{
uint32_t u;
pos = decode_base128_varint<uint32_t>(bs, pos, u);
result = u;
return pos;
}
/**
* @brief Decodes an unsigned 64-bit varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint64_t &result)
{
uint64_t u;
pos = decode_base128_varint<uint64_t>(bs, pos, u);
result = u;
return pos;
}
/**
* @brief Signed version of 32-bit decode_varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int32_t &result)
{
uint32_t u;
pos = decode_base128_varint<uint32_t>(bs, pos, u);
result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
return pos;
}
/**
* @brief Signed version of 64-bit decode_varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int64_t &result)
{
uint64_t u;
pos = decode_base128_varint<uint64_t>(bs, pos, u);
result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
return pos;
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief ORC Integer RLEv1 decoding
*
* @param[in] bs input byte stream
* @param[in] rle RLE state
* @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t)
* @param[in] maxvals maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
static __device__ uint32_t Integer_RLEv1(
orc_bytestream_s *bs, volatile orc_rlev1_state_s *rle, volatile T *vals, uint32_t maxvals, int t)
{
uint32_t numvals, numruns;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals && numruns < num_warps * 12) {
uint32_t pos = lastpos;
uint32_t n = bytestream_readbyte(bs, pos++);
if (n <= 0x7f) {
// Run
int32_t delta;
n = n + 3;
if (numvals + n > maxvals) break;
delta = bytestream_readbyte(bs, pos++);
vals[numvals] = pos & 0xffff;
pos += varint_length<T>(bs, pos);
if (pos > maxpos) break;
rle->run_data[numruns++] = (delta << 24) | (n << 16) | numvals;
numvals += n;
} else {
// Literals
uint32_t i;
n = 0x100 - n;
if (numvals + n > maxvals) break;
i = 0;
do {
vals[numvals + i] = pos & 0xffff;
pos += varint_length<T>(bs, pos);
} while (++i < n);
if (pos > maxpos) break;
numvals += n;
}
lastpos = pos;
}
rle->num_runs = numruns;
rle->num_vals = numvals;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
// Expand the runs
numruns = rle->num_runs;
if (numruns > 0) {
int r = t >> 5;
int tr = t & 0x1f;
for (uint32_t run = r; run < numruns; run += num_warps) {
int32_t run_data = rle->run_data[run];
int n = (run_data >> 16) & 0xff;
int delta = run_data >> 24;
uint32_t base = run_data & 0x3ff;
uint32_t pos = vals[base] & 0xffff;
for (int i = 1 + tr; i < n; i += 32) { vals[base + i] = ((delta * i) << 16) | pos; }
}
__syncthreads();
}
numvals = rle->num_vals;
// Decode individual 32-bit varints
if (t < numvals) {
int32_t pos = vals[t];
int32_t delta = pos >> 16;
T v;
decode_varint(bs, pos, v);
vals[t] = v + delta;
}
__syncthreads();
return numvals;
}
/**
* @brief Maps the RLEv2 5-bit length code to 6-bit length
*/
static const __device__ __constant__ uint8_t kRLEv2_W[32] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 26, 28, 30, 32, 40, 48, 56, 64};
/**
* @brief Maps the RLEv2 patch size (pw + pgw) to number of bits
*
* Patch size (in bits) is only allowed to be from the below set. If `pw + pgw == 34` then the size
* of the patch in the file is the smallest size in the set that can fit 34 bits i.e.
* `ClosestFixedBitsMap[34] == 40`
*
* @see https://github.com/apache/orc/commit/9faf7f5147a7bc69
*/
static const __device__ __constant__ uint8_t ClosestFixedBitsMap[65] = {
1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 26, 26, 28, 28, 30, 30, 32, 32, 40, 40, 40, 40, 40, 40, 40, 40, 48, 48, 48,
48, 48, 48, 48, 48, 56, 56, 56, 56, 56, 56, 56, 56, 64, 64, 64, 64, 64, 64, 64, 64};
/**
* @brief ORC Integer RLEv2 decoding
*
* @param[in] bs input byte stream
* @param[in] rle RLE state
* @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t)
* @param[in] maxvals maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
static __device__ uint32_t Integer_RLEv2(
orc_bytestream_s *bs, volatile orc_rlev2_state_s *rle, volatile T *vals, uint32_t maxvals, int t)
{
uint32_t numvals, numruns;
int r, tr;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals) {
uint32_t pos = lastpos;
uint32_t byte0 = bytestream_readbyte(bs, pos++);
uint32_t n, l;
int mode = byte0 >> 6;
rle->runs_loc[numruns] = numvals;
vals[numvals] = lastpos;
if (mode == 0) {
// 00lllnnn: short repeat encoding
l = 1 + ((byte0 >> 3) & 7); // 1 to 8 bytes
n = 3 + (byte0 & 7); // 3 to 10 values
} else {
l = kRLEv2_W[(byte0 >> 1) & 0x1f];
n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++);
if (mode == 1) {
// 01wwwwwn.nnnnnnnn: direct encoding
l = (l * n + 7) >> 3;
} else if (mode == 2) {
// 10wwwwwn.nnnnnnnn.xxxxxxxx.yyyyyyyy: patched base encoding
uint32_t byte2 = bytestream_readbyte(bs, pos++);
uint32_t byte3 = bytestream_readbyte(bs, pos++);
uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes
uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits
uint32_t pgw = 1 + (byte3 >> 5); // patch gap width, 1 to 8 bits
uint32_t pgw_pw_len = ClosestFixedBitsMap[min(pw + pgw, 64u)]; // ceiled patch width
uint32_t pll = byte3 & 0x1f; // patch list length
l = (l * n + 7) >> 3;
l += bw;
l += (pll * (pgw_pw_len) + 7) >> 3;
} else {
// 11wwwwwn.nnnnnnnn.<base>.<delta>: delta encoding
uint32_t deltapos = varint_length<T>(bs, pos);
deltapos += varint_length<T>(bs, pos + deltapos);
l = (l > 1 && n > 2) ? (l * (n - 2) + 7) >> 3 : 0;
l += deltapos;
}
}
if (numvals + n > maxvals) break;
pos += l;
if (pos > maxpos) break;
lastpos = pos;
numvals += n;
numruns++;
}
rle->num_vals = numvals;
rle->num_runs = numruns;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
// Process the runs, 1 warp per run
numruns = rle->num_runs;
r = t >> 5;
tr = t & 0x1f;
for (uint32_t run = r; run < numruns; run += num_warps) {
uint32_t base, pos, w, n;
int mode;
if (tr == 0) {
uint32_t byte0;
base = rle->runs_loc[run];
pos = vals[base];
byte0 = bytestream_readbyte(bs, pos++);
mode = byte0 >> 6;
if (mode == 0) {
T baseval;
// 00lllnnn: short repeat encoding
w = 8 + (byte0 & 0x38); // 8 to 64 bits
n = 3 + (byte0 & 7); // 3 to 10 values
bytestream_readbe(bs, pos * 8, w, baseval);
if (sizeof(T) <= 4) {
rle->baseval.u32[r] = baseval;
} else {
rle->baseval.u64[r] = baseval;
}
} else {
w = kRLEv2_W[(byte0 >> 1) & 0x1f];
n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++);
if (mode > 1) {
if (mode == 2) {
// Patched base
uint32_t byte2 = bytestream_readbyte(bs, pos++);
uint32_t byte3 = bytestream_readbyte(bs, pos++);
uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes
uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits
if (sizeof(T) <= 4) {
uint32_t baseval, mask;
bytestream_readbe(bs, pos * 8, bw * 8, baseval);
mask = (1 << (bw * 8 - 1)) - 1;
rle->baseval.u32[r] = (baseval > mask) ? (-(int32_t)(baseval & mask)) : baseval;
} else {
uint64_t baseval, mask;
bytestream_readbe(bs, pos * 8, bw * 8, baseval);
mask = 1;
mask <<= (bw * 8) - 1;
mask -= 1;
rle->baseval.u64[r] = (baseval > mask) ? (-(int64_t)(baseval & mask)) : baseval;
}
rle->m2_pw_byte3[r] = (pw << 8) | byte3;
pos += bw;
} else {
T baseval;
int64_t delta;
// Delta
pos = decode_varint(bs, pos, baseval);
if (sizeof(T) <= 4) {
rle->baseval.u32[r] = baseval;
} else {
rle->baseval.u64[r] = baseval;
}
pos = decode_varint(bs, pos, delta);
rle->delta[r] = delta;
}
}
}
}
base = shuffle(base);
mode = shuffle(mode);
pos = shuffle(pos);
n = shuffle(n);
w = shuffle(w);
for (uint32_t i = tr; i < n; i += 32) {
if (sizeof(T) <= 4) {
if (mode == 0) {
vals[base + i] = rle->baseval.u32[r];
} else if (mode == 1) {
T v;
bytestream_readbe(bs, pos * 8 + i * w, w, v);
vals[base + i] = v;
} else if (mode == 2) {
uint32_t ofs = bytestream_readbits(bs, pos * 8 + i * w, w);
vals[base + i] = rle->baseval.u32[r] + ofs;
} else {
int64_t delta = rle->delta[r];
if (w > 1 && i > 1) {
int32_t delta_s = (delta < 0) ? -1 : 0;
vals[base + i] =
(bytestream_readbits(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s;
} else {
vals[base + i] = (i == 0) ? 0 : static_cast<uint32_t>(delta);
}
}
} else {
if (mode == 0) {
vals[base + i] = rle->baseval.u64[r];
} else if (mode == 1) {
T v;
bytestream_readbe(bs, pos * 8 + i * w, w, v);
vals[base + i] = v;
} else if (mode == 2) {
uint64_t ofs = bytestream_readbits64(bs, pos * 8 + i * w, w);
vals[base + i] = rle->baseval.u64[r] + ofs;
} else {
int64_t delta = rle->delta[r], ofs;
if (w > 1 && i > 1) {
int64_t delta_s = (delta < 0) ? -1 : 0;
ofs = (bytestream_readbits64(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s;
} else {
ofs = (i == 0) ? 0 : delta;
}
vals[base + i] = ofs;
}
}
}
__syncwarp();
// Patch values
if (mode == 2) {
uint32_t pw_byte3 = rle->m2_pw_byte3[r];
uint32_t pw = pw_byte3 >> 8;
uint32_t pgw = 1 + ((pw_byte3 >> 5) & 7); // patch gap width, 1 to 8 bits
uint32_t pll = pw_byte3 & 0x1f; // patch list length
if (pll != 0) {
uint32_t pgw_pw_len = ClosestFixedBitsMap[min(pw + pgw, 64u)];
uint64_t patch_pos64 =
(tr < pll) ? bytestream_readbits64(
bs, pos * 8 + ((n * w + 7) & ~7) + tr * (pgw_pw_len), pgw_pw_len)
: 0;
uint32_t patch_pos;
T patch = 1;
patch <<= pw;
patch = (patch - 1) & (T)patch_pos64;
patch <<= w;
patch_pos = (uint32_t)(patch_pos64 >> pw);
for (uint32_t k = 1; k < pll; k <<= 1) {
uint32_t tmp = shuffle(patch_pos, (tr & ~k) | (k - 1));
patch_pos += (tr & k) ? tmp : 0;
}
if (tr < pll && patch_pos < n) { vals[base + patch_pos] += patch; }
}
}
__syncwarp();
if (mode == 3) {
T baseval;
for (uint32_t i = 1; i < n; i <<= 1) {
__syncwarp();
for (uint32_t j = tr; j < n; j += 32) {
if (j & i) vals[base + j] += vals[base + ((j & ~i) | (i - 1))];
}
}
if (sizeof(T) <= 4)
baseval = rle->baseval.u32[r];
else
baseval = rle->baseval.u64[r];
for (uint32_t j = tr; j < n; j += 32) { vals[base + j] += baseval; }
}
}
__syncthreads();
return rle->num_vals;
}
/**
* @brief Reads 32 booleans as a packed 32-bit value
*
* @param[in] vals 32-bit array of values (little-endian)
* @param[in] bitpos bit position
*
* @return 32-bit value
*/
inline __device__ uint32_t rle8_read_bool32(volatile uint32_t *vals, uint32_t bitpos)
{
uint32_t a = vals[(bitpos >> 5) + 0];
uint32_t b = vals[(bitpos >> 5) + 1];
a = __byte_perm(a, 0, 0x0123);
b = __byte_perm(b, 0, 0x0123);
return __brev(__funnelshift_l(b, a, bitpos));
}
/**
* @brief ORC Byte RLE decoding
*
* @param[in] bs Input byte stream
* @param[in] rle RLE state
* @param[in] vals output buffer for decoded 8-bit values
* @param[in] maxvals Maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
static __device__ uint32_t Byte_RLE(orc_bytestream_s *bs,
volatile orc_byterle_state_s *rle,
volatile uint8_t *vals,
uint32_t maxvals,
int t)
{
uint32_t numvals, numruns;
int r, tr;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals && numruns < num_warps) {
uint32_t pos = lastpos, n;
rle->runs_pos[numruns] = pos;
rle->runs_loc[numruns] = numvals;
n = bytestream_readbyte(bs, pos++);
if (n <= 0x7f) {
// Run
n = n + 3;
pos++;
} else {
// Literals
n = 0x100 - n;
pos += n;
}
if (pos > maxpos || numvals + n > maxvals) { break; }
numruns++;
numvals += n;
lastpos = pos;
}
rle->num_runs = numruns;
rle->num_vals = numvals;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
numruns = rle->num_runs;
r = t >> 5;
tr = t & 0x1f;
for (int run = r; run < numruns; run += num_warps) {
uint32_t pos = rle->runs_pos[run];
uint32_t loc = rle->runs_loc[run];
uint32_t n = bytestream_readbyte(bs, pos++);
uint32_t literal_mask;
if (n <= 0x7f) {
literal_mask = 0;
n += 3;
} else {
literal_mask = ~0;
n = 0x100 - n;
}
for (uint32_t i = tr; i < n; i += 32) {
vals[loc + i] = bytestream_readbyte(bs, pos + (i & literal_mask));
}
}
__syncthreads();
return rle->num_vals;
}
/**
* @brief Powers of 10
*/
static const __device__ __constant__ double kPow10[40] = {
1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13,
1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27,
1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39,
};
static const __device__ __constant__ int64_t kPow5i[28] = {1,
5,
25,
125,
625,
3125,
15625,
78125,
390625,
1953125,
9765625,
48828125,
244140625,
1220703125,
6103515625ll,
30517578125ll,
152587890625ll,
762939453125ll,
3814697265625ll,
19073486328125ll,
95367431640625ll,
476837158203125ll,
2384185791015625ll,
11920928955078125ll,
59604644775390625ll,
298023223876953125ll,
1490116119384765625ll,
7450580596923828125ll};
/**
* @brief ORC Decimal decoding (unbounded base-128 varints)
*
* @param[in] bs Input byte stream
* @param[in,out] vals on input: scale from secondary stream, on output: value
* @param[in] numvals Number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
static __device__ int Decode_Decimals(orc_bytestream_s *bs,
volatile orc_byterle_state_s *scratch,
volatile orcdec_state_s::values &vals,
int val_scale,
int numvals,
int col_scale,
int t)
{
uint32_t num_vals_read = 0;
// Iterates till `numvals` are read or there is nothing to read once the
// stream has reached its end, and can't read anything more.
while (num_vals_read != numvals) {
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
uint32_t n;
for (n = num_vals_read; n < numvals; n++) {
uint32_t pos = lastpos;
pos += varint_length<uint4>(bs, pos);
if (pos > maxpos) break;
vals.i64[n] = lastpos;
lastpos = pos;
}
scratch->num_vals = n;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
uint32_t num_vals_to_read = scratch->num_vals;
if (t >= num_vals_read and t < num_vals_to_read) {
auto const pos = static_cast<int>(vals.i64[t]);
int128_s v = decode_varint128(bs, pos);
if (col_scale & orc_decimal2float64_scale) {
double f = Int128ToDouble_rn(v.lo, v.hi);
int32_t scale = (t < numvals) ? val_scale : 0;
if (scale >= 0)
vals.f64[t] = f / kPow10[min(scale, 39)];
else
vals.f64[t] = f * kPow10[min(-scale, 39)];
} else {
int32_t scale = (t < numvals) ? (col_scale & ~orc_decimal2float64_scale) - val_scale : 0;
if (scale >= 0) {
scale = min(scale, 27);
vals.i64[t] = ((int64_t)v.lo * kPow5i[scale]) << scale;
} else // if (scale < 0)
{
bool is_negative = (v.hi < 0);
uint64_t hi = v.hi, lo = v.lo;
scale = min(-scale, 27);
if (is_negative) {
hi = (~hi) + (lo == 0);
lo = (~lo) + 1;
}
lo = (lo >> (uint32_t)scale) | ((uint64_t)hi << (64 - scale));
hi >>= (int32_t)scale;
if (hi != 0) {
// Use intermediate float
lo = __double2ull_rn(Int128ToDouble_rn(lo, hi) / __ll2double_rn(kPow5i[scale]));
hi = 0;
} else {
lo /= kPow5i[scale];
}
vals.i64[t] = (is_negative) ? -(int64_t)lo : (int64_t)lo;
}
}
}
// There is nothing to read, so break
if (num_vals_read == num_vals_to_read) break;
// Update number of values read (This contains values of previous iteration)
num_vals_read = num_vals_to_read;
// Have to wait till all threads have copied data
__syncthreads();
if (num_vals_read != numvals) {
bytestream_fill(bs, t);
__syncthreads();
if (t == 0) {
// Needs to be reset since bytestream has been filled
bs->fill_count = 0;
}
}
// Adding to get all threads in sync before next read
__syncthreads();
}
return num_vals_read;
}
/**
* @brief Decoding NULLs and builds string dictionary index tables
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_num_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuDecodeNullsAndStringDictionaries(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row)
{
__shared__ __align__(16) orcdec_state_s state_g;
using warp_reduce = hipcub::WarpReduce<uint32_t>;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
__shared__ union {
typename warp_reduce::TempStorage wr_storage[block_size / 32];
typename block_reduce::TempStorage bk_storage;
} temp_storage;
orcdec_state_s *const s = &state_g;
bool is_nulldec = (blockIdx.y >= num_stripes);
uint32_t column = blockIdx.x;
uint32_t stripe = (is_nulldec) ? blockIdx.y - num_stripes : blockIdx.y;
uint32_t chunk_id = stripe * num_columns + column;
int t = threadIdx.x;
if (t == 0) s->chunk = chunks[chunk_id];
__syncthreads();
if (is_nulldec) {
uint32_t null_count = 0;
// Decode NULLs
if (t == 0) {
s->chunk.skip_count = 0;
s->top.nulls_desc_row = 0;
bytestream_init(&s->bs, s->chunk.streams[CI_PRESENT], s->chunk.strm_len[CI_PRESENT]);
}
__syncthreads();
if (s->chunk.strm_len[CI_PRESENT] == 0) {
// No present stream: all rows are valid
s->vals.u32[t] = ~0;
}
while (s->top.nulls_desc_row < s->chunk.num_rows) {
uint32_t nrows_max = min(s->chunk.num_rows - s->top.nulls_desc_row, blockDim.x * 32);
uint32_t nrows;
size_t row_in;
bytestream_fill(&s->bs, t);
__syncthreads();
if (s->chunk.strm_len[CI_PRESENT] > 0) {
uint32_t nbytes = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, (nrows_max + 7) >> 3, t);
nrows = min(nrows_max, nbytes * 8u);
if (!nrows) {
// Error: mark all remaining rows as null
nrows = nrows_max;
if (t * 32 < nrows) { s->vals.u32[t] = 0; }
}
} else {
nrows = nrows_max;
}
__syncthreads();
row_in = s->chunk.start_row + s->top.nulls_desc_row;
if (row_in + nrows > first_row && row_in < first_row + max_num_rows &&
s->chunk.valid_map_base != NULL) {
int64_t dst_row = row_in - first_row;
int64_t dst_pos = max(dst_row, (int64_t)0);
uint32_t startbit = -static_cast<int32_t>(min(dst_row, (int64_t)0));
uint32_t nbits = nrows - min(startbit, nrows);
uint32_t *valid = s->chunk.valid_map_base + (dst_pos >> 5);
uint32_t bitpos = static_cast<uint32_t>(dst_pos) & 0x1f;
if ((size_t)(dst_pos + nbits) > max_num_rows) {
nbits = static_cast<uint32_t>(max_num_rows - min((size_t)dst_pos, max_num_rows));
}
// Store bits up to the next 32-bit aligned boundary
if (bitpos != 0) {
uint32_t n = min(32u - bitpos, nbits);
if (t == 0) {
uint32_t mask = ((1 << n) - 1) << bitpos;
uint32_t bits = (rle8_read_bool32(s->vals.u32, startbit) << bitpos) & mask;
atomicAnd(valid, ~mask);
atomicOr(valid, bits);
null_count += __popc((~bits) & mask);
}
nbits -= n;
startbit += n;
valid++;
}
// Store bits aligned
if (t * 32 + 32 <= nbits) {
uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32);
valid[t] = bits;
null_count += __popc(~bits);
} else if (t * 32 < nbits) {
uint32_t n = nbits - t * 32;
uint32_t mask = (1 << n) - 1;
uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32) & mask;
atomicAnd(valid + t, ~mask);
atomicOr(valid + t, bits);
null_count += __popc((~bits) & mask);
}
__syncthreads();
}
// We may have some valid values that are not decoded below first_row -> count these in
// skip_count, so that subsequent kernel can infer the correct row position
if (row_in < first_row && t < 32) {
uint32_t skippedrows = min(static_cast<uint32_t>(first_row - row_in), nrows);
uint32_t skip_count = 0;
for (uint32_t i = t * 32; i < skippedrows; i += 32 * 32) {
// Need to arrange the bytes to apply mask properly.
uint32_t bits = (i + 32 <= skippedrows) ? s->vals.u32[i >> 5]
: (__byte_perm(s->vals.u32[i >> 5], 0, 0x0123) &
(0xffffffffu << (0x20 - skippedrows + i)));
skip_count += __popc(bits);
}
skip_count = warp_reduce(temp_storage.wr_storage[t / 32]).Sum(skip_count);
if (t == 0) { s->chunk.skip_count += skip_count; }
}
__syncthreads();
if (t == 0) { s->top.nulls_desc_row += nrows; }
__syncthreads();
}
__syncthreads();
// Sum up the valid counts and infer null_count
null_count = block_reduce(temp_storage.bk_storage).Sum(null_count);
if (t == 0) {
chunks[chunk_id].null_count = null_count;
chunks[chunk_id].skip_count = s->chunk.skip_count;
}
} else {
// Decode string dictionary
int encoding_kind = s->chunk.encoding_kind;
if ((encoding_kind == DICTIONARY || encoding_kind == DICTIONARY_V2) &&
(s->chunk.dict_len > 0)) {
if (t == 0) {
s->top.dict.dict_len = s->chunk.dict_len;
s->top.dict.local_dict = global_dictionary + s->chunk.dictionary_start; // Local dictionary
s->top.dict.dict_pos = 0;
// CI_DATA2 contains the LENGTH stream coding the length of individual dictionary entries
bytestream_init(&s->bs, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]);
}
__syncthreads();
while (s->top.dict.dict_len > 0) {
uint32_t numvals = min(s->top.dict.dict_len, blockDim.x), len;
volatile uint32_t *vals = s->vals.u32;
bytestream_fill(&s->bs, t);
__syncthreads();
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, vals, numvals, t);
} else // RLEv2
{
numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, vals, numvals, t);
}
__syncthreads();
len = (t < numvals) ? vals[t] : 0;
lengths_to_positions(vals, numvals, t);
__syncthreads();
if (numvals == 0) {
// This is an error (ran out of data)
numvals = min(s->top.dict.dict_len, blockDim.x);
vals[t] = 0;
}
if (t < numvals) {
s->top.dict.local_dict[t] = {s->top.dict.dict_pos + vals[t] - len, len};
}
__syncthreads();
if (t == 0) {
s->top.dict.dict_pos += vals[numvals - 1];
s->top.dict.dict_len -= numvals;
s->top.dict.local_dict += numvals;
}
__syncthreads();
}
}
}
}
/**
* @brief Decode row positions from valid bits
*
* @param[in,out] s Column chunk decoder state
* @param[in] first_row crop all rows below first rows
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*/
template <typename Storage>
static __device__ void DecodeRowPositions(orcdec_state_s *s,
size_t first_row,
int t,
Storage &temp_storage)
{
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
if (t == 0) {
if (s->chunk.skip_count != 0) {
s->u.rowdec.nz_count = min(min(s->chunk.skip_count, s->top.data.max_vals), blockDim.x);
s->chunk.skip_count -= s->u.rowdec.nz_count;
s->top.data.nrows = s->u.rowdec.nz_count;
} else {
s->u.rowdec.nz_count = 0;
}
}
__syncthreads();
if (t < s->u.rowdec.nz_count) {
s->u.rowdec.row[t] = 0; // Skipped values (below first_row)
}
while (s->u.rowdec.nz_count < s->top.data.max_vals &&
s->top.data.cur_row + s->top.data.nrows < s->top.data.end_row) {
uint32_t nrows = min(s->top.data.end_row - (s->top.data.cur_row + s->top.data.nrows),
min((row_decoder_buffer_size - s->u.rowdec.nz_count) * 2, blockDim.x));
if (s->chunk.strm_len[CI_PRESENT] > 0) {
// We have a present stream
uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row);
uint32_t r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row);
uint32_t valid = (t < nrows && r < rmax)
? (((const uint8_t *)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1
: 0;
volatile uint16_t *row_ofs_plus1 =
(volatile uint16_t *)&s->u.rowdec.row[s->u.rowdec.nz_count];
uint32_t nz_pos, row_plus1, nz_count = s->u.rowdec.nz_count, last_row;
if (t < nrows) { row_ofs_plus1[t] = valid; }
lengths_to_positions<uint16_t>(row_ofs_plus1, nrows, t);
if (t < nrows) {
nz_count += row_ofs_plus1[t];
row_plus1 = s->top.data.nrows + t + 1;
} else {
row_plus1 = 0;
}
if (t == nrows - 1) { s->u.rowdec.nz_count = min(nz_count, s->top.data.max_vals); }
__syncthreads();
// TBD: Brute-forcing this, there might be a more efficient way to find the thread with the
// last row
last_row = (nz_count == s->u.rowdec.nz_count) ? row_plus1 : 0;
last_row = block_reduce(temp_storage).Reduce(last_row, hipcub::Max());
nz_pos = (valid) ? nz_count : 0;
if (t == 0) { s->top.data.nrows = last_row; }
if (valid && nz_pos - 1 < s->u.rowdec.nz_count) { s->u.rowdec.row[nz_pos - 1] = row_plus1; }
__syncthreads();
} else {
// All values are valid
nrows = min(nrows, s->top.data.max_vals - s->u.rowdec.nz_count);
if (t < nrows) { s->u.rowdec.row[s->u.rowdec.nz_count + t] = s->top.data.nrows + t + 1; }
__syncthreads();
if (t == 0) {
s->top.data.nrows += nrows;
s->u.rowdec.nz_count += nrows;
}
__syncthreads();
}
}
}
/**
* @brief Trailing zeroes for decoding timestamp nanoseconds
*/
static const __device__ __constant__ uint32_t kTimestampNanoScale[8] = {
1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000};
/**
* @brief Decodes column data
*
* @param[in] chunks ColumnDesc device array
* @param[in] global_dictionary Global dictionary device array
* @param[in] tz_table Timezone translation table
* @param[in] row_groups Optional row index data
* @param[in] max_num_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] num_chunks Number of column chunks (num_columns * num_stripes)
* @param[in] num_rowgroups Number of row groups in row index data
* @param[in] rowidx_stride Row index stride
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuDecodeOrcColumnData(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
timezone_table_view tz_table,
const RowGroup *row_groups,
size_t max_num_rows,
size_t first_row,
uint32_t num_columns,
uint32_t num_rowgroups,
uint32_t rowidx_stride)
{
__shared__ __align__(16) orcdec_state_s state_g;
__shared__ typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcdec_state_s *const s = &state_g;
uint32_t chunk_id;
int t = threadIdx.x;
if (num_rowgroups > 0) {
if (t == 0) s->top.data.index = row_groups[blockIdx.y * num_columns + blockIdx.x];
__syncthreads();
chunk_id = s->top.data.index.chunk_id;
} else {
chunk_id = blockIdx.x;
}
if (t == 0) s->chunk = chunks[chunk_id];
__syncthreads();
if (t == 0) {
// If we have an index, seek to the initial run and update row positions
if (num_rowgroups > 0) {
uint32_t ofs0 = min(s->top.data.index.strm_offset[0], s->chunk.strm_len[CI_DATA]);
uint32_t ofs1 = min(s->top.data.index.strm_offset[1], s->chunk.strm_len[CI_DATA2]);
uint32_t rowgroup_rowofs;
s->chunk.streams[CI_DATA] += ofs0;
s->chunk.strm_len[CI_DATA] -= ofs0;
s->chunk.streams[CI_DATA2] += ofs1;
s->chunk.strm_len[CI_DATA2] -= ofs1;
rowgroup_rowofs = min((blockIdx.y - min(s->chunk.rowgroup_id, blockIdx.y)) * rowidx_stride,
s->chunk.num_rows);
s->chunk.start_row += rowgroup_rowofs;
s->chunk.num_rows -= rowgroup_rowofs;
}
s->is_string = (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR);
s->top.data.cur_row =
max(s->chunk.start_row, max((int32_t)(first_row - s->chunk.skip_count), 0));
s->top.data.end_row = s->chunk.start_row + s->chunk.num_rows;
s->top.data.buffered_count = 0;
if (s->top.data.end_row > first_row + max_num_rows) {
s->top.data.end_row = static_cast<uint32_t>(first_row + max_num_rows);
}
if (num_rowgroups > 0) {
s->top.data.end_row = min(s->top.data.end_row, s->chunk.start_row + rowidx_stride);
}
if (!is_dictionary(s->chunk.encoding_kind)) { s->chunk.dictionary_start = 0; }
s->top.data.utc_epoch = kORCTimeToUTC - tz_table.gmt_offset;
bytestream_init(&s->bs, s->chunk.streams[CI_DATA], s->chunk.strm_len[CI_DATA]);
bytestream_init(&s->bs2, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]);
}
__syncthreads();
while (s->top.data.cur_row < s->top.data.end_row) {
bytestream_fill(&s->bs, t);
bytestream_fill(&s->bs2, t);
__syncthreads();
if (t == 0) {
uint32_t max_vals = s->chunk.start_row + s->chunk.num_rows - s->top.data.cur_row;
if (num_rowgroups > 0 && (s->is_string || s->chunk.type_kind == TIMESTAMP)) {
max_vals +=
s->top.data.index.run_pos[is_dictionary(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2];
}
s->bs.fill_count = 0;
s->bs2.fill_count = 0;
s->top.data.nrows = 0;
s->top.data.max_vals =
min(max_vals, (s->chunk.type_kind == BOOLEAN) ? blockDim.x * 2 : blockDim.x);
}
__syncthreads();
// Decode data streams
{
uint32_t numvals = s->top.data.max_vals, secondary_val;
uint32_t vals_skipped = 0;
if (s->is_string || s->chunk.type_kind == TIMESTAMP) {
// For these data types, we have a secondary unsigned 32-bit data stream
orc_bytestream_s *bs = (is_dictionary(s->chunk.encoding_kind)) ? &s->bs : &s->bs2;
uint32_t ofs = 0;
if (s->chunk.type_kind == TIMESTAMP) {
// Restore buffered secondary stream values, if any
ofs = s->top.data.buffered_count;
if (ofs > 0) {
__syncthreads();
if (t == 0) { s->top.data.buffered_count = 0; }
}
}
if (numvals > ofs) {
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = ofs + Integer_RLEv1(bs, &s->u.rlev1, &s->vals.u32[ofs], numvals - ofs, t);
} else {
numvals = ofs + Integer_RLEv2(bs, &s->u.rlev2, &s->vals.u32[ofs], numvals - ofs, t);
}
__syncthreads();
if (numvals <= ofs && t >= ofs && t < s->top.data.max_vals) { s->vals.u32[t] = 0; }
}
// If we're using an index, we may have to drop values from the initial run
if (num_rowgroups > 0) {
int cid = is_dictionary(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2;
uint32_t run_pos = s->top.data.index.run_pos[cid];
if (run_pos) {
vals_skipped = min(numvals, run_pos);
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[cid] = 0; }
numvals -= vals_skipped;
if (t < numvals) { secondary_val = s->vals.u32[vals_skipped + t]; }
__syncthreads();
if (t < numvals) { s->vals.u32[t] = secondary_val; }
}
}
__syncthreads();
// For strings with direct encoding, we need to convert the lengths into an offset
if (!is_dictionary(s->chunk.encoding_kind)) {
secondary_val = (t < numvals) ? s->vals.u32[t] : 0;
if (s->chunk.type_kind != TIMESTAMP) {
lengths_to_positions(s->vals.u32, numvals, t);
__syncthreads();
}
}
// Adjust the maximum number of values
if (numvals == 0 && vals_skipped == 0) {
numvals = s->top.data.max_vals; // Just so that we don't hang if the stream is corrupted
}
if (t == 0 && numvals < s->top.data.max_vals) { s->top.data.max_vals = numvals; }
}
__syncthreads();
// Account for skipped values
if (num_rowgroups > 0 && !s->is_string) {
uint32_t run_pos = (s->chunk.type_kind == DECIMAL) ? s->top.data.index.run_pos[CI_DATA2]
: s->top.data.index.run_pos[CI_DATA];
numvals =
min(numvals + run_pos, (s->chunk.type_kind == BOOLEAN) ? blockDim.x * 2 : blockDim.x);
}
// Decode the primary data stream
if (s->chunk.type_kind == INT || s->chunk.type_kind == DATE || s->chunk.type_kind == SHORT) {
// Signed int32 primary data stream
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, s->vals.i32, numvals, t);
} else {
numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, s->vals.i32, numvals, t);
}
__syncthreads();
} else if (s->chunk.type_kind == BYTE) {
numvals = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, numvals, t);
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
int n = ((numvals + 7) >> 3);
if (n > s->top.data.buffered_count) {
numvals = Byte_RLE(&s->bs,
&s->u.rle8,
&s->vals.u8[s->top.data.buffered_count],
n - s->top.data.buffered_count,
t) +
s->top.data.buffered_count;
} else {
numvals = s->top.data.buffered_count;
}
__syncthreads();
if (t == 0) {
s->top.data.buffered_count = 0;
s->top.data.max_vals = min(s->top.data.max_vals, blockDim.x);
}
__syncthreads();
// If the condition is false, then it means that s->top.data.max_vals is last set of values.
// And as numvals is considered to be min(`max_vals+s->top.data.index.run_pos[CI_DATA]`,
// blockDim.x*2) we have to return numvals >= s->top.data.index.run_pos[CI_DATA].
auto const is_last_set = (s->top.data.max_vals >= s->top.data.index.run_pos[CI_DATA]);
auto const max_vals = (is_last_set ? s->top.data.max_vals + 7 : blockDim.x) / 8;
n = numvals - max_vals;
if (t < n) {
secondary_val = s->vals.u8[max_vals + t];
if (t == 0) { s->top.data.buffered_count = n; }
}
numvals = min(numvals * 8, is_last_set ? s->top.data.max_vals : blockDim.x);
} else if (s->chunk.type_kind == LONG || s->chunk.type_kind == TIMESTAMP ||
s->chunk.type_kind == DECIMAL) {
orc_bytestream_s *bs = (s->chunk.type_kind == DECIMAL) ? &s->bs2 : &s->bs;
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1<int64_t>(bs, &s->u.rlev1, s->vals.i64, numvals, t);
} else {
numvals = Integer_RLEv2<int64_t>(bs, &s->u.rlev2, s->vals.i64, numvals, t);
}
if (s->chunk.type_kind == DECIMAL) {
// If we're using an index, we may have to drop values from the initial run
uint32_t skip = 0;
int val_scale;
if (num_rowgroups > 0) {
uint32_t run_pos = s->top.data.index.run_pos[CI_DATA2];
if (run_pos) {
skip = min(numvals, run_pos);
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[CI_DATA2] = 0; }
numvals -= skip;
}
}
val_scale = (t < numvals) ? (int)s->vals.i64[skip + t] : 0;
__syncthreads();
numvals = Decode_Decimals(
&s->bs, &s->u.rle8, s->vals, val_scale, numvals, s->chunk.decimal_scale, t);
}
__syncthreads();
} else if (s->chunk.type_kind == FLOAT) {
numvals = min(numvals, (bytestream_buffer_size - 8u) >> 2);
if (t < numvals) { s->vals.u32[t] = bytestream_readu32(&s->bs, s->bs.pos + t * 4); }
__syncthreads();
if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 4); }
__syncthreads();
} else if (s->chunk.type_kind == DOUBLE) {
numvals = min(numvals, (bytestream_buffer_size - 8u) >> 3);
if (t < numvals) { s->vals.u64[t] = bytestream_readu64(&s->bs, s->bs.pos + t * 8); }
__syncthreads();
if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 8); }
__syncthreads();
}
__syncthreads();
if (numvals == 0 && vals_skipped != 0 && num_rowgroups > 0) {
// Special case if the secondary streams produced fewer values than the primary stream's RLE
// run, as a result of initial RLE run offset: keep vals_skipped as non-zero to ensure
// proper buffered_count/max_vals update below.
} else {
vals_skipped = 0;
if (num_rowgroups > 0) {
uint32_t run_pos = s->top.data.index.run_pos[CI_DATA];
if (run_pos) {
vals_skipped = min(numvals, run_pos);
numvals -= vals_skipped;
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[CI_DATA] = 0; }
}
}
}
if (t == 0 && numvals + vals_skipped > 0 && numvals < s->top.data.max_vals) {
if (s->chunk.type_kind == TIMESTAMP) {
s->top.data.buffered_count = s->top.data.max_vals - numvals;
}
s->top.data.max_vals = numvals;
}
__syncthreads();
// Use the valid bits to compute non-null row positions until we get a full batch of values to
// decode
DecodeRowPositions(s, first_row, t, temp_storage);
if (!s->top.data.nrows && !s->u.rowdec.nz_count && !vals_skipped) {
// This is a bug (could happen with bitstream errors with a bad run that would produce more
// values than the number of remaining rows)
return;
}
// Store decoded values to output
if (t < min(min(s->top.data.max_vals, s->u.rowdec.nz_count), s->top.data.nrows) &&
s->u.rowdec.row[t] != 0 &&
s->top.data.cur_row + s->u.rowdec.row[t] - 1 < s->top.data.end_row) {
size_t row = s->top.data.cur_row + s->u.rowdec.row[t] - 1 - first_row;
if (row < max_num_rows) {
void *data_out = s->chunk.column_data_base;
switch (s->chunk.type_kind) {
case FLOAT:
case INT: static_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; break;
case DOUBLE:
case LONG:
case DECIMAL:
static_cast<uint64_t *>(data_out)[row] = s->vals.u64[t + vals_skipped];
break;
case SHORT:
static_cast<uint16_t *>(data_out)[row] =
static_cast<uint16_t>(s->vals.u32[t + vals_skipped]);
break;
case BYTE: static_cast<uint8_t *>(data_out)[row] = s->vals.u8[t + vals_skipped]; break;
case BOOLEAN:
static_cast<uint8_t *>(data_out)[row] =
(s->vals.u8[(t + vals_skipped) >> 3] >> ((~(t + vals_skipped)) & 7)) & 1;
break;
case DATE:
if (s->chunk.dtype_len == 8) {
// Convert from days to milliseconds by multiplying by 24*3600*1000
static_cast<int64_t *>(data_out)[row] =
86400000ll * (int64_t)s->vals.i32[t + vals_skipped];
} else {
static_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped];
}
break;
case STRING:
case BINARY:
case VARCHAR:
case CHAR: {
nvstrdesc_s *strdesc = &static_cast<nvstrdesc_s *>(data_out)[row];
void const *ptr = nullptr;
uint32_t count = 0;
if (is_dictionary(s->chunk.encoding_kind)) {
auto const dict_idx = s->vals.u32[t + vals_skipped];
if (dict_idx < s->chunk.dict_len) {
auto const &g_entry = global_dictionary[s->chunk.dictionary_start + dict_idx];
ptr = s->chunk.streams[CI_DICTIONARY] + g_entry.pos;
count = g_entry.len;
}
} else {
auto const dict_idx =
s->chunk.dictionary_start + s->vals.u32[t + vals_skipped] - secondary_val;
if (dict_idx + count <= s->chunk.strm_len[CI_DATA]) {
ptr = s->chunk.streams[CI_DATA] + dict_idx;
count = secondary_val;
}
}
strdesc->ptr = static_cast<char const *>(ptr);
strdesc->count = count;
break;
}
case TIMESTAMP: {
int64_t seconds = s->vals.i64[t + vals_skipped] + s->top.data.utc_epoch;
uint32_t nanos = secondary_val;
nanos = (nanos >> 3) * kTimestampNanoScale[nanos & 7];
if (!tz_table.ttimes.empty()) {
seconds += get_gmt_offset(tz_table.ttimes, tz_table.offsets, seconds);
}
if (seconds < 0 && nanos != 0) { seconds -= 1; }
if (s->chunk.ts_clock_rate)
static_cast<int64_t *>(data_out)[row] =
seconds * s->chunk.ts_clock_rate +
(nanos + (499999999 / s->chunk.ts_clock_rate)) /
(1000000000 / s->chunk.ts_clock_rate); // Output to desired clock rate
else
static_cast<int64_t *>(data_out)[row] = seconds * 1000000000 + nanos;
break;
}
}
}
}
__syncthreads();
// Buffer secondary stream values
if (s->chunk.type_kind == TIMESTAMP) {
int buffer_pos = s->top.data.max_vals;
if (t >= buffer_pos && t < buffer_pos + s->top.data.buffered_count) {
s->vals.u32[t - buffer_pos] = secondary_val;
}
} else if (s->chunk.type_kind == BOOLEAN && t < s->top.data.buffered_count) {
s->vals.u8[t] = secondary_val;
}
}
__syncthreads();
if (t == 0) {
s->top.data.cur_row += s->top.data.nrows;
if (s->is_string && !is_dictionary(s->chunk.encoding_kind) && s->top.data.max_vals > 0) {
s->chunk.dictionary_start += s->vals.u32[s->top.data.max_vals - 1];
}
}
__syncthreads();
}
}
/**
* @brief Launches kernel for decoding NULLs and building string dictionary index tables
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ DecodeNullsAndStringDictionaries(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row,
rmm::cuda_stream_view stream)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(num_columns, num_stripes * 2); // 1024 threads per chunk
hipLaunchKernelGGL(( gpuDecodeNullsAndStringDictionaries<block_size>), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
chunks, global_dictionary, num_columns, num_stripes, max_num_rows, first_row);
}
/**
* @brief Launches kernel for decoding column data
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] tz_table Timezone translation table
* @param[in] row_groups Optional row index data
* @param[in] num_rowgroups Number of row groups in row index data
* @param[in] rowidx_stride Row index stride
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ DecodeOrcColumnData(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row,
timezone_table_view tz_table,
const RowGroup *row_groups,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
rmm::cuda_stream_view stream)
{
uint32_t num_chunks = num_columns * num_stripes;
dim3 dim_block(block_size, 1); // 1024 threads per chunk
dim3 dim_grid((num_rowgroups > 0) ? num_columns : num_chunks,
(num_rowgroups > 0) ? num_rowgroups : 1);
hipLaunchKernelGGL(( gpuDecodeOrcColumnData<block_size>), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks,
global_dictionary,
tz_table,
row_groups,
max_num_rows,
first_row,
num_columns,
num_rowgroups,
rowidx_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| 878d66687165702f4e8db3ac518b28de862e5d1e.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include "orc_common.h"
#include "orc_gpu.h"
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
// Must be able to handle 512x 8-byte values. These values are base 128 encoded
// so 8 byte value is expanded to 10 bytes.
constexpr int bytestream_buffer_size = 512 * 8 * 2;
constexpr int bytestream_buffer_mask = (bytestream_buffer_size - 1) >> 2;
// TODO: Should be more efficient with 512 threads per block and circular queue for values
constexpr int num_warps = 32;
constexpr int block_size = 32 * num_warps;
// Add some margin to look ahead to future rows in case there are many zeroes
constexpr int row_decoder_buffer_size = block_size + 128;
inline __device__ uint8_t is_rlev1(uint8_t encoding_mode) { return encoding_mode < DIRECT_V2; }
inline __device__ uint8_t is_dictionary(uint8_t encoding_mode) { return encoding_mode & 1; }
static __device__ __constant__ int64_t kORCTimeToUTC =
1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct int128_s {
uint64_t lo;
int64_t hi;
};
struct orc_bytestream_s {
const uint8_t *base;
uint32_t pos;
uint32_t len;
uint32_t fill_pos;
uint32_t fill_count;
union {
uint8_t u8[bytestream_buffer_size];
uint32_t u32[bytestream_buffer_size >> 2];
uint2 u64[bytestream_buffer_size >> 3];
} buf;
};
struct orc_rlev1_state_s {
uint32_t num_runs;
uint32_t num_vals;
int32_t run_data[num_warps * 12]; // (delta << 24) | (count << 16) | (first_val)
};
struct orc_rlev2_state_s {
uint32_t num_runs;
uint32_t num_vals;
union {
uint32_t u32[num_warps];
uint64_t u64[num_warps];
} baseval;
uint16_t m2_pw_byte3[num_warps];
int64_t delta[num_warps];
uint16_t runs_loc[block_size];
};
struct orc_byterle_state_s {
uint32_t num_runs;
uint32_t num_vals;
uint32_t runs_loc[num_warps];
uint32_t runs_pos[num_warps];
};
struct orc_rowdec_state_s {
uint32_t nz_count;
uint32_t row[row_decoder_buffer_size]; // 0=skip, >0: row position relative to cur_row
};
struct orc_strdict_state_s {
DictionaryEntry *local_dict;
uint32_t dict_pos;
uint32_t dict_len;
};
struct orc_datadec_state_s {
uint32_t cur_row; // starting row of current batch
uint32_t end_row; // ending row of this chunk (start_row + num_rows)
uint32_t max_vals; // max # of non-zero values to decode in this batch
uint32_t nrows; // # of rows in current batch (up to block_size)
uint32_t buffered_count; // number of buffered values in the secondary data stream
int64_t utc_epoch; // kORCTimeToUTC - gmtOffset
RowGroup index;
};
struct orcdec_state_s {
ColumnDesc chunk;
orc_bytestream_s bs;
orc_bytestream_s bs2;
int is_string;
union {
orc_strdict_state_s dict;
uint32_t nulls_desc_row; // number of rows processed for nulls.
orc_datadec_state_s data;
} top;
union {
orc_rlev1_state_s rlev1;
orc_rlev2_state_s rlev2;
orc_byterle_state_s rle8;
orc_rowdec_state_s rowdec;
} u;
union values {
uint8_t u8[block_size * 8];
uint32_t u32[block_size * 2];
int32_t i32[block_size * 2];
uint64_t u64[block_size];
int64_t i64[block_size];
double f64[block_size];
} vals;
};
/**
* @brief Initializes byte stream, modifying length and start position to keep the read pointer
* 8-byte aligned.
*
* Assumes that the address range [start_address & ~7, (start_address + len - 1) | 7]
* is valid.
*
* @param[in,out] bs Byte stream input
* @param[in] base Pointer to raw byte stream data
* @param[in] len Stream length in bytes
*/
static __device__ void bytestream_init(volatile orc_bytestream_s *bs,
const uint8_t *base,
uint32_t len)
{
uint32_t pos = static_cast<uint32_t>(7 & reinterpret_cast<size_t>(base));
bs->base = base - pos;
bs->pos = (len > 0) ? pos : 0;
bs->len = (len + pos + 7) & ~7;
bs->fill_pos = 0;
bs->fill_count = min(bs->len, bytestream_buffer_size) >> 3;
}
/**
* @brief Increment the read position, returns number of 64-bit slots to fill
*
* @param[in] bs Byte stream input
* @param[in] bytes_consumed Number of bytes that were consumed
*/
static __device__ void bytestream_flush_bytes(volatile orc_bytestream_s *bs,
uint32_t bytes_consumed)
{
uint32_t pos = bs->pos;
uint32_t len = bs->len;
uint32_t pos_new = min(pos + bytes_consumed, len);
bs->pos = pos_new;
pos = min(pos + bytestream_buffer_size, len);
pos_new = min(pos_new + bytestream_buffer_size, len);
bs->fill_pos = pos;
bs->fill_count = (pos_new >> 3) - (pos >> 3);
}
/**
* @brief Refill the byte stream buffer
*
* @param[in] bs Byte stream input
* @param[in] t thread id
*/
static __device__ void bytestream_fill(orc_bytestream_s *bs, int t)
{
auto const count = bs->fill_count;
if (t < count) {
auto const pos8 = (bs->fill_pos >> 3) + t;
memcpy(&bs->buf.u64[pos8 & ((bytestream_buffer_size >> 3) - 1)],
&bs->base[pos8 * sizeof(uint2)],
sizeof(uint2));
}
}
/**
* @brief Read a byte from the byte stream (byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @return byte
*/
inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s *bs, int pos)
{
return bs->buf.u8[pos & (bytestream_buffer_size - 1)];
}
/**
* @brief Read 32 bits from a byte stream (little endian, byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @result bits
*/
inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s *bs, int pos)
{
uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2];
uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2];
return __funnelshift_r(a, b, (pos & 3) * 8);
}
/**
* @brief Read 64 bits from a byte stream (little endian, byte aligned)
*
* @param[in] bs Byte stream input
* @param[in] pos Position in byte stream
* @param[in] numbits number of bits
* @return bits
*/
inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s *bs, int pos)
{
uint32_t a = bs->buf.u32[(pos & (bytestream_buffer_size - 1)) >> 2];
uint32_t b = bs->buf.u32[((pos + 4) & (bytestream_buffer_size - 1)) >> 2];
uint32_t c = bs->buf.u32[((pos + 8) & (bytestream_buffer_size - 1)) >> 2];
uint32_t lo32 = __funnelshift_r(a, b, (pos & 3) * 8);
uint32_t hi32 = __funnelshift_r(b, c, (pos & 3) * 8);
uint64_t v = hi32;
v <<= 32;
v |= lo32;
return v;
}
/**
* @brief Read up to 32-bits from a byte stream (big endian)
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @return decoded value
*/
inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits)
{
int idx = bitpos >> 5;
uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123);
uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & bytestream_buffer_mask], 0, 0x0123);
return __funnelshift_l(b, a, bitpos & 0x1f) >> (32 - numbits);
}
/**
* @brief Read up to 64-bits from a byte stream (big endian)
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @return decoded value
*/
inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits)
{
int idx = bitpos >> 5;
uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & bytestream_buffer_mask], 0, 0x0123);
uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & bytestream_buffer_mask], 0, 0x0123);
uint32_t c = __byte_perm(bs->buf.u32[(idx + 2) & bytestream_buffer_mask], 0, 0x0123);
uint32_t hi32 = __funnelshift_l(b, a, bitpos & 0x1f);
uint32_t lo32 = __funnelshift_l(c, b, bitpos & 0x1f);
uint64_t v = hi32;
v <<= 32;
v |= lo32;
v >>= (64 - numbits);
return v;
}
/**
* @brief Decode a big-endian unsigned 32-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
uint32_t &result)
{
result = bytestream_readbits(bs, bitpos, numbits);
}
/**
* @brief Decode a big-endian signed 32-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
int32_t &result)
{
uint32_t u = bytestream_readbits(bs, bitpos, numbits);
result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
/**
* @brief Decode a big-endian unsigned 64-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
uint64_t &result)
{
result = bytestream_readbits64(bs, bitpos, numbits);
}
/**
* @brief Decode a big-endian signed 64-bit value
*
* @param[in] bs Byte stream input
* @param[in] bitpos Position in byte stream
* @param[in] numbits number of bits
* @param[out] result decoded value
*/
inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs,
int bitpos,
uint32_t numbits,
int64_t &result)
{
uint64_t u = bytestream_readbits64(bs, bitpos, numbits);
result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
}
/**
* @brief Return the length of a base-128 varint
*
* @param[in] bs Byte stream input
* @param[in] pos Position in circular byte stream buffer
* @return length of varint in bytes
*/
template <class T>
inline __device__ uint32_t varint_length(volatile orc_bytestream_s *bs, int pos)
{
if (bytestream_readbyte(bs, pos) > 0x7f) {
uint32_t next32 = bytestream_readu32(bs, pos + 1);
uint32_t zbit = __ffs((~next32) & 0x80808080);
if (sizeof(T) <= 4 || zbit) {
return 1 + (zbit >> 3); // up to 5x7 bits
} else {
next32 = bytestream_readu32(bs, pos + 5);
zbit = __ffs((~next32) & 0x80808080);
if (zbit) {
return 5 + (zbit >> 3); // up to 9x7 bits
} else if ((sizeof(T) <= 8) || (bytestream_readbyte(bs, pos + 9) <= 0x7f)) {
return 10; // up to 70 bits
} else {
uint64_t next64 = bytestream_readu64(bs, pos + 10);
zbit = __ffsll((~next64) & 0x8080808080808080ull);
if (zbit) {
return 10 + (zbit >> 3); // Up to 18x7 bits (126)
} else {
return 19; // Up to 19x7 bits (133)
}
}
}
} else {
return 1;
}
}
/**
* @brief Decodes a base-128 varint
*
* @param[in] bs Byte stream input
* @param[in] pos Position in circular byte stream buffer
* @param[in] result Unpacked value
* @return new position in byte stream buffer
*/
template <class T>
inline __device__ int decode_base128_varint(volatile orc_bytestream_s *bs, int pos, T &result)
{
uint32_t v = bytestream_readbyte(bs, pos++);
if (v > 0x7f) {
uint32_t b = bytestream_readbyte(bs, pos++);
v = (v & 0x7f) | (b << 7);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x3fff) | (b << 14);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x1fffff) | (b << 21);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x0fffffff) | (b << 28);
if (sizeof(T) > 4) {
uint32_t lo = v;
uint64_t hi;
v = b >> 4;
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 7) | (b << 3);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x3ff) | (b << 10);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0x1ffff) | (b << 17);
if (b > 0x7f) {
b = bytestream_readbyte(bs, pos++);
v = (v & 0xffffff) | (b << 24);
if (b > 0x7f) {
pos++; // last bit is redundant (extra byte implies bit63 is 1)
}
}
}
}
}
hi = v;
hi <<= 32;
result = hi | lo;
return pos;
}
}
}
}
}
result = v;
return pos;
}
/**
* @brief Decodes a signed int128 encoded as base-128 varint (used for decimals)
*/
inline __device__ int128_s decode_varint128(volatile orc_bytestream_s *bs, int pos)
{
uint32_t b = bytestream_readbyte(bs, pos++);
int64_t sign_mask = -(int32_t)(b & 1);
uint64_t v = (b >> 1) & 0x3f;
uint32_t bitpos = 6;
uint64_t lo = v;
uint64_t hi = 0;
while (b > 0x7f && bitpos < 128) {
b = bytestream_readbyte(bs, pos++);
v |= ((uint64_t)(b & 0x7f)) << (bitpos & 0x3f);
if (bitpos == 62) { // 6 + 7 * 8 = 62
lo = v;
v = (b & 0x7f) >> 2; // 64 - 62
}
bitpos += 7;
}
if (bitpos >= 64) {
hi = v;
} else {
lo = v;
}
return {(uint64_t)(lo ^ sign_mask), (int64_t)(hi ^ sign_mask)};
}
/**
* @brief Decodes an unsigned 32-bit varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint32_t &result)
{
uint32_t u;
pos = decode_base128_varint<uint32_t>(bs, pos, u);
result = u;
return pos;
}
/**
* @brief Decodes an unsigned 64-bit varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint64_t &result)
{
uint64_t u;
pos = decode_base128_varint<uint64_t>(bs, pos, u);
result = u;
return pos;
}
/**
* @brief Signed version of 32-bit decode_varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int32_t &result)
{
uint32_t u;
pos = decode_base128_varint<uint32_t>(bs, pos, u);
result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
return pos;
}
/**
* @brief Signed version of 64-bit decode_varint
*/
inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int64_t &result)
{
uint64_t u;
pos = decode_base128_varint<uint64_t>(bs, pos, u);
result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1));
return pos;
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief ORC Integer RLEv1 decoding
*
* @param[in] bs input byte stream
* @param[in] rle RLE state
* @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t)
* @param[in] maxvals maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
static __device__ uint32_t Integer_RLEv1(
orc_bytestream_s *bs, volatile orc_rlev1_state_s *rle, volatile T *vals, uint32_t maxvals, int t)
{
uint32_t numvals, numruns;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals && numruns < num_warps * 12) {
uint32_t pos = lastpos;
uint32_t n = bytestream_readbyte(bs, pos++);
if (n <= 0x7f) {
// Run
int32_t delta;
n = n + 3;
if (numvals + n > maxvals) break;
delta = bytestream_readbyte(bs, pos++);
vals[numvals] = pos & 0xffff;
pos += varint_length<T>(bs, pos);
if (pos > maxpos) break;
rle->run_data[numruns++] = (delta << 24) | (n << 16) | numvals;
numvals += n;
} else {
// Literals
uint32_t i;
n = 0x100 - n;
if (numvals + n > maxvals) break;
i = 0;
do {
vals[numvals + i] = pos & 0xffff;
pos += varint_length<T>(bs, pos);
} while (++i < n);
if (pos > maxpos) break;
numvals += n;
}
lastpos = pos;
}
rle->num_runs = numruns;
rle->num_vals = numvals;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
// Expand the runs
numruns = rle->num_runs;
if (numruns > 0) {
int r = t >> 5;
int tr = t & 0x1f;
for (uint32_t run = r; run < numruns; run += num_warps) {
int32_t run_data = rle->run_data[run];
int n = (run_data >> 16) & 0xff;
int delta = run_data >> 24;
uint32_t base = run_data & 0x3ff;
uint32_t pos = vals[base] & 0xffff;
for (int i = 1 + tr; i < n; i += 32) { vals[base + i] = ((delta * i) << 16) | pos; }
}
__syncthreads();
}
numvals = rle->num_vals;
// Decode individual 32-bit varints
if (t < numvals) {
int32_t pos = vals[t];
int32_t delta = pos >> 16;
T v;
decode_varint(bs, pos, v);
vals[t] = v + delta;
}
__syncthreads();
return numvals;
}
/**
* @brief Maps the RLEv2 5-bit length code to 6-bit length
*/
static const __device__ __constant__ uint8_t kRLEv2_W[32] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 26, 28, 30, 32, 40, 48, 56, 64};
/**
* @brief Maps the RLEv2 patch size (pw + pgw) to number of bits
*
* Patch size (in bits) is only allowed to be from the below set. If `pw + pgw == 34` then the size
* of the patch in the file is the smallest size in the set that can fit 34 bits i.e.
* `ClosestFixedBitsMap[34] == 40`
*
* @see https://github.com/apache/orc/commit/9faf7f5147a7bc69
*/
static const __device__ __constant__ uint8_t ClosestFixedBitsMap[65] = {
1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 26, 26, 28, 28, 30, 30, 32, 32, 40, 40, 40, 40, 40, 40, 40, 40, 48, 48, 48,
48, 48, 48, 48, 48, 56, 56, 56, 56, 56, 56, 56, 56, 64, 64, 64, 64, 64, 64, 64, 64};
/**
* @brief ORC Integer RLEv2 decoding
*
* @param[in] bs input byte stream
* @param[in] rle RLE state
* @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t)
* @param[in] maxvals maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
template <class T>
static __device__ uint32_t Integer_RLEv2(
orc_bytestream_s *bs, volatile orc_rlev2_state_s *rle, volatile T *vals, uint32_t maxvals, int t)
{
uint32_t numvals, numruns;
int r, tr;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals) {
uint32_t pos = lastpos;
uint32_t byte0 = bytestream_readbyte(bs, pos++);
uint32_t n, l;
int mode = byte0 >> 6;
rle->runs_loc[numruns] = numvals;
vals[numvals] = lastpos;
if (mode == 0) {
// 00lllnnn: short repeat encoding
l = 1 + ((byte0 >> 3) & 7); // 1 to 8 bytes
n = 3 + (byte0 & 7); // 3 to 10 values
} else {
l = kRLEv2_W[(byte0 >> 1) & 0x1f];
n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++);
if (mode == 1) {
// 01wwwwwn.nnnnnnnn: direct encoding
l = (l * n + 7) >> 3;
} else if (mode == 2) {
// 10wwwwwn.nnnnnnnn.xxxxxxxx.yyyyyyyy: patched base encoding
uint32_t byte2 = bytestream_readbyte(bs, pos++);
uint32_t byte3 = bytestream_readbyte(bs, pos++);
uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes
uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits
uint32_t pgw = 1 + (byte3 >> 5); // patch gap width, 1 to 8 bits
uint32_t pgw_pw_len = ClosestFixedBitsMap[min(pw + pgw, 64u)]; // ceiled patch width
uint32_t pll = byte3 & 0x1f; // patch list length
l = (l * n + 7) >> 3;
l += bw;
l += (pll * (pgw_pw_len) + 7) >> 3;
} else {
// 11wwwwwn.nnnnnnnn.<base>.<delta>: delta encoding
uint32_t deltapos = varint_length<T>(bs, pos);
deltapos += varint_length<T>(bs, pos + deltapos);
l = (l > 1 && n > 2) ? (l * (n - 2) + 7) >> 3 : 0;
l += deltapos;
}
}
if (numvals + n > maxvals) break;
pos += l;
if (pos > maxpos) break;
lastpos = pos;
numvals += n;
numruns++;
}
rle->num_vals = numvals;
rle->num_runs = numruns;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
// Process the runs, 1 warp per run
numruns = rle->num_runs;
r = t >> 5;
tr = t & 0x1f;
for (uint32_t run = r; run < numruns; run += num_warps) {
uint32_t base, pos, w, n;
int mode;
if (tr == 0) {
uint32_t byte0;
base = rle->runs_loc[run];
pos = vals[base];
byte0 = bytestream_readbyte(bs, pos++);
mode = byte0 >> 6;
if (mode == 0) {
T baseval;
// 00lllnnn: short repeat encoding
w = 8 + (byte0 & 0x38); // 8 to 64 bits
n = 3 + (byte0 & 7); // 3 to 10 values
bytestream_readbe(bs, pos * 8, w, baseval);
if (sizeof(T) <= 4) {
rle->baseval.u32[r] = baseval;
} else {
rle->baseval.u64[r] = baseval;
}
} else {
w = kRLEv2_W[(byte0 >> 1) & 0x1f];
n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++);
if (mode > 1) {
if (mode == 2) {
// Patched base
uint32_t byte2 = bytestream_readbyte(bs, pos++);
uint32_t byte3 = bytestream_readbyte(bs, pos++);
uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes
uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits
if (sizeof(T) <= 4) {
uint32_t baseval, mask;
bytestream_readbe(bs, pos * 8, bw * 8, baseval);
mask = (1 << (bw * 8 - 1)) - 1;
rle->baseval.u32[r] = (baseval > mask) ? (-(int32_t)(baseval & mask)) : baseval;
} else {
uint64_t baseval, mask;
bytestream_readbe(bs, pos * 8, bw * 8, baseval);
mask = 1;
mask <<= (bw * 8) - 1;
mask -= 1;
rle->baseval.u64[r] = (baseval > mask) ? (-(int64_t)(baseval & mask)) : baseval;
}
rle->m2_pw_byte3[r] = (pw << 8) | byte3;
pos += bw;
} else {
T baseval;
int64_t delta;
// Delta
pos = decode_varint(bs, pos, baseval);
if (sizeof(T) <= 4) {
rle->baseval.u32[r] = baseval;
} else {
rle->baseval.u64[r] = baseval;
}
pos = decode_varint(bs, pos, delta);
rle->delta[r] = delta;
}
}
}
}
base = shuffle(base);
mode = shuffle(mode);
pos = shuffle(pos);
n = shuffle(n);
w = shuffle(w);
for (uint32_t i = tr; i < n; i += 32) {
if (sizeof(T) <= 4) {
if (mode == 0) {
vals[base + i] = rle->baseval.u32[r];
} else if (mode == 1) {
T v;
bytestream_readbe(bs, pos * 8 + i * w, w, v);
vals[base + i] = v;
} else if (mode == 2) {
uint32_t ofs = bytestream_readbits(bs, pos * 8 + i * w, w);
vals[base + i] = rle->baseval.u32[r] + ofs;
} else {
int64_t delta = rle->delta[r];
if (w > 1 && i > 1) {
int32_t delta_s = (delta < 0) ? -1 : 0;
vals[base + i] =
(bytestream_readbits(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s;
} else {
vals[base + i] = (i == 0) ? 0 : static_cast<uint32_t>(delta);
}
}
} else {
if (mode == 0) {
vals[base + i] = rle->baseval.u64[r];
} else if (mode == 1) {
T v;
bytestream_readbe(bs, pos * 8 + i * w, w, v);
vals[base + i] = v;
} else if (mode == 2) {
uint64_t ofs = bytestream_readbits64(bs, pos * 8 + i * w, w);
vals[base + i] = rle->baseval.u64[r] + ofs;
} else {
int64_t delta = rle->delta[r], ofs;
if (w > 1 && i > 1) {
int64_t delta_s = (delta < 0) ? -1 : 0;
ofs = (bytestream_readbits64(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s;
} else {
ofs = (i == 0) ? 0 : delta;
}
vals[base + i] = ofs;
}
}
}
__syncwarp();
// Patch values
if (mode == 2) {
uint32_t pw_byte3 = rle->m2_pw_byte3[r];
uint32_t pw = pw_byte3 >> 8;
uint32_t pgw = 1 + ((pw_byte3 >> 5) & 7); // patch gap width, 1 to 8 bits
uint32_t pll = pw_byte3 & 0x1f; // patch list length
if (pll != 0) {
uint32_t pgw_pw_len = ClosestFixedBitsMap[min(pw + pgw, 64u)];
uint64_t patch_pos64 =
(tr < pll) ? bytestream_readbits64(
bs, pos * 8 + ((n * w + 7) & ~7) + tr * (pgw_pw_len), pgw_pw_len)
: 0;
uint32_t patch_pos;
T patch = 1;
patch <<= pw;
patch = (patch - 1) & (T)patch_pos64;
patch <<= w;
patch_pos = (uint32_t)(patch_pos64 >> pw);
for (uint32_t k = 1; k < pll; k <<= 1) {
uint32_t tmp = shuffle(patch_pos, (tr & ~k) | (k - 1));
patch_pos += (tr & k) ? tmp : 0;
}
if (tr < pll && patch_pos < n) { vals[base + patch_pos] += patch; }
}
}
__syncwarp();
if (mode == 3) {
T baseval;
for (uint32_t i = 1; i < n; i <<= 1) {
__syncwarp();
for (uint32_t j = tr; j < n; j += 32) {
if (j & i) vals[base + j] += vals[base + ((j & ~i) | (i - 1))];
}
}
if (sizeof(T) <= 4)
baseval = rle->baseval.u32[r];
else
baseval = rle->baseval.u64[r];
for (uint32_t j = tr; j < n; j += 32) { vals[base + j] += baseval; }
}
}
__syncthreads();
return rle->num_vals;
}
/**
* @brief Reads 32 booleans as a packed 32-bit value
*
* @param[in] vals 32-bit array of values (little-endian)
* @param[in] bitpos bit position
*
* @return 32-bit value
*/
inline __device__ uint32_t rle8_read_bool32(volatile uint32_t *vals, uint32_t bitpos)
{
uint32_t a = vals[(bitpos >> 5) + 0];
uint32_t b = vals[(bitpos >> 5) + 1];
a = __byte_perm(a, 0, 0x0123);
b = __byte_perm(b, 0, 0x0123);
return __brev(__funnelshift_l(b, a, bitpos));
}
/**
* @brief ORC Byte RLE decoding
*
* @param[in] bs Input byte stream
* @param[in] rle RLE state
* @param[in] vals output buffer for decoded 8-bit values
* @param[in] maxvals Maximum number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
static __device__ uint32_t Byte_RLE(orc_bytestream_s *bs,
volatile orc_byterle_state_s *rle,
volatile uint8_t *vals,
uint32_t maxvals,
int t)
{
uint32_t numvals, numruns;
int r, tr;
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
numvals = numruns = 0;
// Find the length and start location of each run
while (numvals < maxvals && numruns < num_warps) {
uint32_t pos = lastpos, n;
rle->runs_pos[numruns] = pos;
rle->runs_loc[numruns] = numvals;
n = bytestream_readbyte(bs, pos++);
if (n <= 0x7f) {
// Run
n = n + 3;
pos++;
} else {
// Literals
n = 0x100 - n;
pos += n;
}
if (pos > maxpos || numvals + n > maxvals) { break; }
numruns++;
numvals += n;
lastpos = pos;
}
rle->num_runs = numruns;
rle->num_vals = numvals;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
numruns = rle->num_runs;
r = t >> 5;
tr = t & 0x1f;
for (int run = r; run < numruns; run += num_warps) {
uint32_t pos = rle->runs_pos[run];
uint32_t loc = rle->runs_loc[run];
uint32_t n = bytestream_readbyte(bs, pos++);
uint32_t literal_mask;
if (n <= 0x7f) {
literal_mask = 0;
n += 3;
} else {
literal_mask = ~0;
n = 0x100 - n;
}
for (uint32_t i = tr; i < n; i += 32) {
vals[loc + i] = bytestream_readbyte(bs, pos + (i & literal_mask));
}
}
__syncthreads();
return rle->num_vals;
}
/**
* @brief Powers of 10
*/
static const __device__ __constant__ double kPow10[40] = {
1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13,
1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27,
1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39,
};
static const __device__ __constant__ int64_t kPow5i[28] = {1,
5,
25,
125,
625,
3125,
15625,
78125,
390625,
1953125,
9765625,
48828125,
244140625,
1220703125,
6103515625ll,
30517578125ll,
152587890625ll,
762939453125ll,
3814697265625ll,
19073486328125ll,
95367431640625ll,
476837158203125ll,
2384185791015625ll,
11920928955078125ll,
59604644775390625ll,
298023223876953125ll,
1490116119384765625ll,
7450580596923828125ll};
/**
* @brief ORC Decimal decoding (unbounded base-128 varints)
*
* @param[in] bs Input byte stream
* @param[in,out] vals on input: scale from secondary stream, on output: value
* @param[in] numvals Number of values to decode
* @param[in] t thread id
*
* @return number of values decoded
*/
static __device__ int Decode_Decimals(orc_bytestream_s *bs,
volatile orc_byterle_state_s *scratch,
volatile orcdec_state_s::values &vals,
int val_scale,
int numvals,
int col_scale,
int t)
{
uint32_t num_vals_read = 0;
// Iterates till `numvals` are read or there is nothing to read once the
// stream has reached its end, and can't read anything more.
while (num_vals_read != numvals) {
if (t == 0) {
uint32_t maxpos = min(bs->len, bs->pos + (bytestream_buffer_size - 8u));
uint32_t lastpos = bs->pos;
uint32_t n;
for (n = num_vals_read; n < numvals; n++) {
uint32_t pos = lastpos;
pos += varint_length<uint4>(bs, pos);
if (pos > maxpos) break;
vals.i64[n] = lastpos;
lastpos = pos;
}
scratch->num_vals = n;
bytestream_flush_bytes(bs, lastpos - bs->pos);
}
__syncthreads();
uint32_t num_vals_to_read = scratch->num_vals;
if (t >= num_vals_read and t < num_vals_to_read) {
auto const pos = static_cast<int>(vals.i64[t]);
int128_s v = decode_varint128(bs, pos);
if (col_scale & orc_decimal2float64_scale) {
double f = Int128ToDouble_rn(v.lo, v.hi);
int32_t scale = (t < numvals) ? val_scale : 0;
if (scale >= 0)
vals.f64[t] = f / kPow10[min(scale, 39)];
else
vals.f64[t] = f * kPow10[min(-scale, 39)];
} else {
int32_t scale = (t < numvals) ? (col_scale & ~orc_decimal2float64_scale) - val_scale : 0;
if (scale >= 0) {
scale = min(scale, 27);
vals.i64[t] = ((int64_t)v.lo * kPow5i[scale]) << scale;
} else // if (scale < 0)
{
bool is_negative = (v.hi < 0);
uint64_t hi = v.hi, lo = v.lo;
scale = min(-scale, 27);
if (is_negative) {
hi = (~hi) + (lo == 0);
lo = (~lo) + 1;
}
lo = (lo >> (uint32_t)scale) | ((uint64_t)hi << (64 - scale));
hi >>= (int32_t)scale;
if (hi != 0) {
// Use intermediate float
lo = __double2ull_rn(Int128ToDouble_rn(lo, hi) / __ll2double_rn(kPow5i[scale]));
hi = 0;
} else {
lo /= kPow5i[scale];
}
vals.i64[t] = (is_negative) ? -(int64_t)lo : (int64_t)lo;
}
}
}
// There is nothing to read, so break
if (num_vals_read == num_vals_to_read) break;
// Update number of values read (This contains values of previous iteration)
num_vals_read = num_vals_to_read;
// Have to wait till all threads have copied data
__syncthreads();
if (num_vals_read != numvals) {
bytestream_fill(bs, t);
__syncthreads();
if (t == 0) {
// Needs to be reset since bytestream has been filled
bs->fill_count = 0;
}
}
// Adding to get all threads in sync before next read
__syncthreads();
}
return num_vals_read;
}
/**
* @brief Decoding NULLs and builds string dictionary index tables
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_num_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuDecodeNullsAndStringDictionaries(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row)
{
__shared__ __align__(16) orcdec_state_s state_g;
using warp_reduce = cub::WarpReduce<uint32_t>;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
__shared__ union {
typename warp_reduce::TempStorage wr_storage[block_size / 32];
typename block_reduce::TempStorage bk_storage;
} temp_storage;
orcdec_state_s *const s = &state_g;
bool is_nulldec = (blockIdx.y >= num_stripes);
uint32_t column = blockIdx.x;
uint32_t stripe = (is_nulldec) ? blockIdx.y - num_stripes : blockIdx.y;
uint32_t chunk_id = stripe * num_columns + column;
int t = threadIdx.x;
if (t == 0) s->chunk = chunks[chunk_id];
__syncthreads();
if (is_nulldec) {
uint32_t null_count = 0;
// Decode NULLs
if (t == 0) {
s->chunk.skip_count = 0;
s->top.nulls_desc_row = 0;
bytestream_init(&s->bs, s->chunk.streams[CI_PRESENT], s->chunk.strm_len[CI_PRESENT]);
}
__syncthreads();
if (s->chunk.strm_len[CI_PRESENT] == 0) {
// No present stream: all rows are valid
s->vals.u32[t] = ~0;
}
while (s->top.nulls_desc_row < s->chunk.num_rows) {
uint32_t nrows_max = min(s->chunk.num_rows - s->top.nulls_desc_row, blockDim.x * 32);
uint32_t nrows;
size_t row_in;
bytestream_fill(&s->bs, t);
__syncthreads();
if (s->chunk.strm_len[CI_PRESENT] > 0) {
uint32_t nbytes = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, (nrows_max + 7) >> 3, t);
nrows = min(nrows_max, nbytes * 8u);
if (!nrows) {
// Error: mark all remaining rows as null
nrows = nrows_max;
if (t * 32 < nrows) { s->vals.u32[t] = 0; }
}
} else {
nrows = nrows_max;
}
__syncthreads();
row_in = s->chunk.start_row + s->top.nulls_desc_row;
if (row_in + nrows > first_row && row_in < first_row + max_num_rows &&
s->chunk.valid_map_base != NULL) {
int64_t dst_row = row_in - first_row;
int64_t dst_pos = max(dst_row, (int64_t)0);
uint32_t startbit = -static_cast<int32_t>(min(dst_row, (int64_t)0));
uint32_t nbits = nrows - min(startbit, nrows);
uint32_t *valid = s->chunk.valid_map_base + (dst_pos >> 5);
uint32_t bitpos = static_cast<uint32_t>(dst_pos) & 0x1f;
if ((size_t)(dst_pos + nbits) > max_num_rows) {
nbits = static_cast<uint32_t>(max_num_rows - min((size_t)dst_pos, max_num_rows));
}
// Store bits up to the next 32-bit aligned boundary
if (bitpos != 0) {
uint32_t n = min(32u - bitpos, nbits);
if (t == 0) {
uint32_t mask = ((1 << n) - 1) << bitpos;
uint32_t bits = (rle8_read_bool32(s->vals.u32, startbit) << bitpos) & mask;
atomicAnd(valid, ~mask);
atomicOr(valid, bits);
null_count += __popc((~bits) & mask);
}
nbits -= n;
startbit += n;
valid++;
}
// Store bits aligned
if (t * 32 + 32 <= nbits) {
uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32);
valid[t] = bits;
null_count += __popc(~bits);
} else if (t * 32 < nbits) {
uint32_t n = nbits - t * 32;
uint32_t mask = (1 << n) - 1;
uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32) & mask;
atomicAnd(valid + t, ~mask);
atomicOr(valid + t, bits);
null_count += __popc((~bits) & mask);
}
__syncthreads();
}
// We may have some valid values that are not decoded below first_row -> count these in
// skip_count, so that subsequent kernel can infer the correct row position
if (row_in < first_row && t < 32) {
uint32_t skippedrows = min(static_cast<uint32_t>(first_row - row_in), nrows);
uint32_t skip_count = 0;
for (uint32_t i = t * 32; i < skippedrows; i += 32 * 32) {
// Need to arrange the bytes to apply mask properly.
uint32_t bits = (i + 32 <= skippedrows) ? s->vals.u32[i >> 5]
: (__byte_perm(s->vals.u32[i >> 5], 0, 0x0123) &
(0xffffffffu << (0x20 - skippedrows + i)));
skip_count += __popc(bits);
}
skip_count = warp_reduce(temp_storage.wr_storage[t / 32]).Sum(skip_count);
if (t == 0) { s->chunk.skip_count += skip_count; }
}
__syncthreads();
if (t == 0) { s->top.nulls_desc_row += nrows; }
__syncthreads();
}
__syncthreads();
// Sum up the valid counts and infer null_count
null_count = block_reduce(temp_storage.bk_storage).Sum(null_count);
if (t == 0) {
chunks[chunk_id].null_count = null_count;
chunks[chunk_id].skip_count = s->chunk.skip_count;
}
} else {
// Decode string dictionary
int encoding_kind = s->chunk.encoding_kind;
if ((encoding_kind == DICTIONARY || encoding_kind == DICTIONARY_V2) &&
(s->chunk.dict_len > 0)) {
if (t == 0) {
s->top.dict.dict_len = s->chunk.dict_len;
s->top.dict.local_dict = global_dictionary + s->chunk.dictionary_start; // Local dictionary
s->top.dict.dict_pos = 0;
// CI_DATA2 contains the LENGTH stream coding the length of individual dictionary entries
bytestream_init(&s->bs, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]);
}
__syncthreads();
while (s->top.dict.dict_len > 0) {
uint32_t numvals = min(s->top.dict.dict_len, blockDim.x), len;
volatile uint32_t *vals = s->vals.u32;
bytestream_fill(&s->bs, t);
__syncthreads();
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, vals, numvals, t);
} else // RLEv2
{
numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, vals, numvals, t);
}
__syncthreads();
len = (t < numvals) ? vals[t] : 0;
lengths_to_positions(vals, numvals, t);
__syncthreads();
if (numvals == 0) {
// This is an error (ran out of data)
numvals = min(s->top.dict.dict_len, blockDim.x);
vals[t] = 0;
}
if (t < numvals) {
s->top.dict.local_dict[t] = {s->top.dict.dict_pos + vals[t] - len, len};
}
__syncthreads();
if (t == 0) {
s->top.dict.dict_pos += vals[numvals - 1];
s->top.dict.dict_len -= numvals;
s->top.dict.local_dict += numvals;
}
__syncthreads();
}
}
}
}
/**
* @brief Decode row positions from valid bits
*
* @param[in,out] s Column chunk decoder state
* @param[in] first_row crop all rows below first rows
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*/
template <typename Storage>
static __device__ void DecodeRowPositions(orcdec_state_s *s,
size_t first_row,
int t,
Storage &temp_storage)
{
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
if (t == 0) {
if (s->chunk.skip_count != 0) {
s->u.rowdec.nz_count = min(min(s->chunk.skip_count, s->top.data.max_vals), blockDim.x);
s->chunk.skip_count -= s->u.rowdec.nz_count;
s->top.data.nrows = s->u.rowdec.nz_count;
} else {
s->u.rowdec.nz_count = 0;
}
}
__syncthreads();
if (t < s->u.rowdec.nz_count) {
s->u.rowdec.row[t] = 0; // Skipped values (below first_row)
}
while (s->u.rowdec.nz_count < s->top.data.max_vals &&
s->top.data.cur_row + s->top.data.nrows < s->top.data.end_row) {
uint32_t nrows = min(s->top.data.end_row - (s->top.data.cur_row + s->top.data.nrows),
min((row_decoder_buffer_size - s->u.rowdec.nz_count) * 2, blockDim.x));
if (s->chunk.strm_len[CI_PRESENT] > 0) {
// We have a present stream
uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row);
uint32_t r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row);
uint32_t valid = (t < nrows && r < rmax)
? (((const uint8_t *)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1
: 0;
volatile uint16_t *row_ofs_plus1 =
(volatile uint16_t *)&s->u.rowdec.row[s->u.rowdec.nz_count];
uint32_t nz_pos, row_plus1, nz_count = s->u.rowdec.nz_count, last_row;
if (t < nrows) { row_ofs_plus1[t] = valid; }
lengths_to_positions<uint16_t>(row_ofs_plus1, nrows, t);
if (t < nrows) {
nz_count += row_ofs_plus1[t];
row_plus1 = s->top.data.nrows + t + 1;
} else {
row_plus1 = 0;
}
if (t == nrows - 1) { s->u.rowdec.nz_count = min(nz_count, s->top.data.max_vals); }
__syncthreads();
// TBD: Brute-forcing this, there might be a more efficient way to find the thread with the
// last row
last_row = (nz_count == s->u.rowdec.nz_count) ? row_plus1 : 0;
last_row = block_reduce(temp_storage).Reduce(last_row, cub::Max());
nz_pos = (valid) ? nz_count : 0;
if (t == 0) { s->top.data.nrows = last_row; }
if (valid && nz_pos - 1 < s->u.rowdec.nz_count) { s->u.rowdec.row[nz_pos - 1] = row_plus1; }
__syncthreads();
} else {
// All values are valid
nrows = min(nrows, s->top.data.max_vals - s->u.rowdec.nz_count);
if (t < nrows) { s->u.rowdec.row[s->u.rowdec.nz_count + t] = s->top.data.nrows + t + 1; }
__syncthreads();
if (t == 0) {
s->top.data.nrows += nrows;
s->u.rowdec.nz_count += nrows;
}
__syncthreads();
}
}
}
/**
* @brief Trailing zeroes for decoding timestamp nanoseconds
*/
static const __device__ __constant__ uint32_t kTimestampNanoScale[8] = {
1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000};
/**
* @brief Decodes column data
*
* @param[in] chunks ColumnDesc device array
* @param[in] global_dictionary Global dictionary device array
* @param[in] tz_table Timezone translation table
* @param[in] row_groups Optional row index data
* @param[in] max_num_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] num_chunks Number of column chunks (num_columns * num_stripes)
* @param[in] num_rowgroups Number of row groups in row index data
* @param[in] rowidx_stride Row index stride
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuDecodeOrcColumnData(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
timezone_table_view tz_table,
const RowGroup *row_groups,
size_t max_num_rows,
size_t first_row,
uint32_t num_columns,
uint32_t num_rowgroups,
uint32_t rowidx_stride)
{
__shared__ __align__(16) orcdec_state_s state_g;
__shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcdec_state_s *const s = &state_g;
uint32_t chunk_id;
int t = threadIdx.x;
if (num_rowgroups > 0) {
if (t == 0) s->top.data.index = row_groups[blockIdx.y * num_columns + blockIdx.x];
__syncthreads();
chunk_id = s->top.data.index.chunk_id;
} else {
chunk_id = blockIdx.x;
}
if (t == 0) s->chunk = chunks[chunk_id];
__syncthreads();
if (t == 0) {
// If we have an index, seek to the initial run and update row positions
if (num_rowgroups > 0) {
uint32_t ofs0 = min(s->top.data.index.strm_offset[0], s->chunk.strm_len[CI_DATA]);
uint32_t ofs1 = min(s->top.data.index.strm_offset[1], s->chunk.strm_len[CI_DATA2]);
uint32_t rowgroup_rowofs;
s->chunk.streams[CI_DATA] += ofs0;
s->chunk.strm_len[CI_DATA] -= ofs0;
s->chunk.streams[CI_DATA2] += ofs1;
s->chunk.strm_len[CI_DATA2] -= ofs1;
rowgroup_rowofs = min((blockIdx.y - min(s->chunk.rowgroup_id, blockIdx.y)) * rowidx_stride,
s->chunk.num_rows);
s->chunk.start_row += rowgroup_rowofs;
s->chunk.num_rows -= rowgroup_rowofs;
}
s->is_string = (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY ||
s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR);
s->top.data.cur_row =
max(s->chunk.start_row, max((int32_t)(first_row - s->chunk.skip_count), 0));
s->top.data.end_row = s->chunk.start_row + s->chunk.num_rows;
s->top.data.buffered_count = 0;
if (s->top.data.end_row > first_row + max_num_rows) {
s->top.data.end_row = static_cast<uint32_t>(first_row + max_num_rows);
}
if (num_rowgroups > 0) {
s->top.data.end_row = min(s->top.data.end_row, s->chunk.start_row + rowidx_stride);
}
if (!is_dictionary(s->chunk.encoding_kind)) { s->chunk.dictionary_start = 0; }
s->top.data.utc_epoch = kORCTimeToUTC - tz_table.gmt_offset;
bytestream_init(&s->bs, s->chunk.streams[CI_DATA], s->chunk.strm_len[CI_DATA]);
bytestream_init(&s->bs2, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]);
}
__syncthreads();
while (s->top.data.cur_row < s->top.data.end_row) {
bytestream_fill(&s->bs, t);
bytestream_fill(&s->bs2, t);
__syncthreads();
if (t == 0) {
uint32_t max_vals = s->chunk.start_row + s->chunk.num_rows - s->top.data.cur_row;
if (num_rowgroups > 0 && (s->is_string || s->chunk.type_kind == TIMESTAMP)) {
max_vals +=
s->top.data.index.run_pos[is_dictionary(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2];
}
s->bs.fill_count = 0;
s->bs2.fill_count = 0;
s->top.data.nrows = 0;
s->top.data.max_vals =
min(max_vals, (s->chunk.type_kind == BOOLEAN) ? blockDim.x * 2 : blockDim.x);
}
__syncthreads();
// Decode data streams
{
uint32_t numvals = s->top.data.max_vals, secondary_val;
uint32_t vals_skipped = 0;
if (s->is_string || s->chunk.type_kind == TIMESTAMP) {
// For these data types, we have a secondary unsigned 32-bit data stream
orc_bytestream_s *bs = (is_dictionary(s->chunk.encoding_kind)) ? &s->bs : &s->bs2;
uint32_t ofs = 0;
if (s->chunk.type_kind == TIMESTAMP) {
// Restore buffered secondary stream values, if any
ofs = s->top.data.buffered_count;
if (ofs > 0) {
__syncthreads();
if (t == 0) { s->top.data.buffered_count = 0; }
}
}
if (numvals > ofs) {
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = ofs + Integer_RLEv1(bs, &s->u.rlev1, &s->vals.u32[ofs], numvals - ofs, t);
} else {
numvals = ofs + Integer_RLEv2(bs, &s->u.rlev2, &s->vals.u32[ofs], numvals - ofs, t);
}
__syncthreads();
if (numvals <= ofs && t >= ofs && t < s->top.data.max_vals) { s->vals.u32[t] = 0; }
}
// If we're using an index, we may have to drop values from the initial run
if (num_rowgroups > 0) {
int cid = is_dictionary(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2;
uint32_t run_pos = s->top.data.index.run_pos[cid];
if (run_pos) {
vals_skipped = min(numvals, run_pos);
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[cid] = 0; }
numvals -= vals_skipped;
if (t < numvals) { secondary_val = s->vals.u32[vals_skipped + t]; }
__syncthreads();
if (t < numvals) { s->vals.u32[t] = secondary_val; }
}
}
__syncthreads();
// For strings with direct encoding, we need to convert the lengths into an offset
if (!is_dictionary(s->chunk.encoding_kind)) {
secondary_val = (t < numvals) ? s->vals.u32[t] : 0;
if (s->chunk.type_kind != TIMESTAMP) {
lengths_to_positions(s->vals.u32, numvals, t);
__syncthreads();
}
}
// Adjust the maximum number of values
if (numvals == 0 && vals_skipped == 0) {
numvals = s->top.data.max_vals; // Just so that we don't hang if the stream is corrupted
}
if (t == 0 && numvals < s->top.data.max_vals) { s->top.data.max_vals = numvals; }
}
__syncthreads();
// Account for skipped values
if (num_rowgroups > 0 && !s->is_string) {
uint32_t run_pos = (s->chunk.type_kind == DECIMAL) ? s->top.data.index.run_pos[CI_DATA2]
: s->top.data.index.run_pos[CI_DATA];
numvals =
min(numvals + run_pos, (s->chunk.type_kind == BOOLEAN) ? blockDim.x * 2 : blockDim.x);
}
// Decode the primary data stream
if (s->chunk.type_kind == INT || s->chunk.type_kind == DATE || s->chunk.type_kind == SHORT) {
// Signed int32 primary data stream
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, s->vals.i32, numvals, t);
} else {
numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, s->vals.i32, numvals, t);
}
__syncthreads();
} else if (s->chunk.type_kind == BYTE) {
numvals = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, numvals, t);
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
int n = ((numvals + 7) >> 3);
if (n > s->top.data.buffered_count) {
numvals = Byte_RLE(&s->bs,
&s->u.rle8,
&s->vals.u8[s->top.data.buffered_count],
n - s->top.data.buffered_count,
t) +
s->top.data.buffered_count;
} else {
numvals = s->top.data.buffered_count;
}
__syncthreads();
if (t == 0) {
s->top.data.buffered_count = 0;
s->top.data.max_vals = min(s->top.data.max_vals, blockDim.x);
}
__syncthreads();
// If the condition is false, then it means that s->top.data.max_vals is last set of values.
// And as numvals is considered to be min(`max_vals+s->top.data.index.run_pos[CI_DATA]`,
// blockDim.x*2) we have to return numvals >= s->top.data.index.run_pos[CI_DATA].
auto const is_last_set = (s->top.data.max_vals >= s->top.data.index.run_pos[CI_DATA]);
auto const max_vals = (is_last_set ? s->top.data.max_vals + 7 : blockDim.x) / 8;
n = numvals - max_vals;
if (t < n) {
secondary_val = s->vals.u8[max_vals + t];
if (t == 0) { s->top.data.buffered_count = n; }
}
numvals = min(numvals * 8, is_last_set ? s->top.data.max_vals : blockDim.x);
} else if (s->chunk.type_kind == LONG || s->chunk.type_kind == TIMESTAMP ||
s->chunk.type_kind == DECIMAL) {
orc_bytestream_s *bs = (s->chunk.type_kind == DECIMAL) ? &s->bs2 : &s->bs;
if (is_rlev1(s->chunk.encoding_kind)) {
numvals = Integer_RLEv1<int64_t>(bs, &s->u.rlev1, s->vals.i64, numvals, t);
} else {
numvals = Integer_RLEv2<int64_t>(bs, &s->u.rlev2, s->vals.i64, numvals, t);
}
if (s->chunk.type_kind == DECIMAL) {
// If we're using an index, we may have to drop values from the initial run
uint32_t skip = 0;
int val_scale;
if (num_rowgroups > 0) {
uint32_t run_pos = s->top.data.index.run_pos[CI_DATA2];
if (run_pos) {
skip = min(numvals, run_pos);
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[CI_DATA2] = 0; }
numvals -= skip;
}
}
val_scale = (t < numvals) ? (int)s->vals.i64[skip + t] : 0;
__syncthreads();
numvals = Decode_Decimals(
&s->bs, &s->u.rle8, s->vals, val_scale, numvals, s->chunk.decimal_scale, t);
}
__syncthreads();
} else if (s->chunk.type_kind == FLOAT) {
numvals = min(numvals, (bytestream_buffer_size - 8u) >> 2);
if (t < numvals) { s->vals.u32[t] = bytestream_readu32(&s->bs, s->bs.pos + t * 4); }
__syncthreads();
if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 4); }
__syncthreads();
} else if (s->chunk.type_kind == DOUBLE) {
numvals = min(numvals, (bytestream_buffer_size - 8u) >> 3);
if (t < numvals) { s->vals.u64[t] = bytestream_readu64(&s->bs, s->bs.pos + t * 8); }
__syncthreads();
if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 8); }
__syncthreads();
}
__syncthreads();
if (numvals == 0 && vals_skipped != 0 && num_rowgroups > 0) {
// Special case if the secondary streams produced fewer values than the primary stream's RLE
// run, as a result of initial RLE run offset: keep vals_skipped as non-zero to ensure
// proper buffered_count/max_vals update below.
} else {
vals_skipped = 0;
if (num_rowgroups > 0) {
uint32_t run_pos = s->top.data.index.run_pos[CI_DATA];
if (run_pos) {
vals_skipped = min(numvals, run_pos);
numvals -= vals_skipped;
__syncthreads();
if (t == 0) { s->top.data.index.run_pos[CI_DATA] = 0; }
}
}
}
if (t == 0 && numvals + vals_skipped > 0 && numvals < s->top.data.max_vals) {
if (s->chunk.type_kind == TIMESTAMP) {
s->top.data.buffered_count = s->top.data.max_vals - numvals;
}
s->top.data.max_vals = numvals;
}
__syncthreads();
// Use the valid bits to compute non-null row positions until we get a full batch of values to
// decode
DecodeRowPositions(s, first_row, t, temp_storage);
if (!s->top.data.nrows && !s->u.rowdec.nz_count && !vals_skipped) {
// This is a bug (could happen with bitstream errors with a bad run that would produce more
// values than the number of remaining rows)
return;
}
// Store decoded values to output
if (t < min(min(s->top.data.max_vals, s->u.rowdec.nz_count), s->top.data.nrows) &&
s->u.rowdec.row[t] != 0 &&
s->top.data.cur_row + s->u.rowdec.row[t] - 1 < s->top.data.end_row) {
size_t row = s->top.data.cur_row + s->u.rowdec.row[t] - 1 - first_row;
if (row < max_num_rows) {
void *data_out = s->chunk.column_data_base;
switch (s->chunk.type_kind) {
case FLOAT:
case INT: static_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; break;
case DOUBLE:
case LONG:
case DECIMAL:
static_cast<uint64_t *>(data_out)[row] = s->vals.u64[t + vals_skipped];
break;
case SHORT:
static_cast<uint16_t *>(data_out)[row] =
static_cast<uint16_t>(s->vals.u32[t + vals_skipped]);
break;
case BYTE: static_cast<uint8_t *>(data_out)[row] = s->vals.u8[t + vals_skipped]; break;
case BOOLEAN:
static_cast<uint8_t *>(data_out)[row] =
(s->vals.u8[(t + vals_skipped) >> 3] >> ((~(t + vals_skipped)) & 7)) & 1;
break;
case DATE:
if (s->chunk.dtype_len == 8) {
// Convert from days to milliseconds by multiplying by 24*3600*1000
static_cast<int64_t *>(data_out)[row] =
86400000ll * (int64_t)s->vals.i32[t + vals_skipped];
} else {
static_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped];
}
break;
case STRING:
case BINARY:
case VARCHAR:
case CHAR: {
nvstrdesc_s *strdesc = &static_cast<nvstrdesc_s *>(data_out)[row];
void const *ptr = nullptr;
uint32_t count = 0;
if (is_dictionary(s->chunk.encoding_kind)) {
auto const dict_idx = s->vals.u32[t + vals_skipped];
if (dict_idx < s->chunk.dict_len) {
auto const &g_entry = global_dictionary[s->chunk.dictionary_start + dict_idx];
ptr = s->chunk.streams[CI_DICTIONARY] + g_entry.pos;
count = g_entry.len;
}
} else {
auto const dict_idx =
s->chunk.dictionary_start + s->vals.u32[t + vals_skipped] - secondary_val;
if (dict_idx + count <= s->chunk.strm_len[CI_DATA]) {
ptr = s->chunk.streams[CI_DATA] + dict_idx;
count = secondary_val;
}
}
strdesc->ptr = static_cast<char const *>(ptr);
strdesc->count = count;
break;
}
case TIMESTAMP: {
int64_t seconds = s->vals.i64[t + vals_skipped] + s->top.data.utc_epoch;
uint32_t nanos = secondary_val;
nanos = (nanos >> 3) * kTimestampNanoScale[nanos & 7];
if (!tz_table.ttimes.empty()) {
seconds += get_gmt_offset(tz_table.ttimes, tz_table.offsets, seconds);
}
if (seconds < 0 && nanos != 0) { seconds -= 1; }
if (s->chunk.ts_clock_rate)
static_cast<int64_t *>(data_out)[row] =
seconds * s->chunk.ts_clock_rate +
(nanos + (499999999 / s->chunk.ts_clock_rate)) /
(1000000000 / s->chunk.ts_clock_rate); // Output to desired clock rate
else
static_cast<int64_t *>(data_out)[row] = seconds * 1000000000 + nanos;
break;
}
}
}
}
__syncthreads();
// Buffer secondary stream values
if (s->chunk.type_kind == TIMESTAMP) {
int buffer_pos = s->top.data.max_vals;
if (t >= buffer_pos && t < buffer_pos + s->top.data.buffered_count) {
s->vals.u32[t - buffer_pos] = secondary_val;
}
} else if (s->chunk.type_kind == BOOLEAN && t < s->top.data.buffered_count) {
s->vals.u8[t] = secondary_val;
}
}
__syncthreads();
if (t == 0) {
s->top.data.cur_row += s->top.data.nrows;
if (s->is_string && !is_dictionary(s->chunk.encoding_kind) && s->top.data.max_vals > 0) {
s->chunk.dictionary_start += s->vals.u32[s->top.data.max_vals - 1];
}
}
__syncthreads();
}
}
/**
* @brief Launches kernel for decoding NULLs and building string dictionary index tables
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ DecodeNullsAndStringDictionaries(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row,
rmm::cuda_stream_view stream)
{
dim3 dim_block(block_size, 1);
dim3 dim_grid(num_columns, num_stripes * 2); // 1024 threads per chunk
gpuDecodeNullsAndStringDictionaries<block_size><<<dim_grid, dim_block, 0, stream.value()>>>(
chunks, global_dictionary, num_columns, num_stripes, max_num_rows, first_row);
}
/**
* @brief Launches kernel for decoding column data
*
* @param[in] chunks ColumnDesc device array [stripe][column]
* @param[in] global_dictionary Global dictionary device array
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] max_rows Maximum number of rows to load
* @param[in] first_row Crop all rows below first_row
* @param[in] tz_table Timezone translation table
* @param[in] row_groups Optional row index data
* @param[in] num_rowgroups Number of row groups in row index data
* @param[in] rowidx_stride Row index stride
* @param[in] stream CUDA stream to use, default 0
*/
void __host__ DecodeOrcColumnData(ColumnDesc *chunks,
DictionaryEntry *global_dictionary,
uint32_t num_columns,
uint32_t num_stripes,
size_t max_num_rows,
size_t first_row,
timezone_table_view tz_table,
const RowGroup *row_groups,
uint32_t num_rowgroups,
uint32_t rowidx_stride,
rmm::cuda_stream_view stream)
{
uint32_t num_chunks = num_columns * num_stripes;
dim3 dim_block(block_size, 1); // 1024 threads per chunk
dim3 dim_grid((num_rowgroups > 0) ? num_columns : num_chunks,
(num_rowgroups > 0) ? num_rowgroups : 1);
gpuDecodeOrcColumnData<block_size><<<dim_grid, dim_block, 0, stream.value()>>>(chunks,
global_dictionary,
tz_table,
row_groups,
max_num_rows,
first_row,
num_columns,
num_rowgroups,
rowidx_stride);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
59b38376c17c9f39a83750b6c654f0446cee8a9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "include.cuh"
#include "System.cuh"
#include "Solver.cuh"
#include "APGD.cuh"
#include "PDIP.cuh"
#include "TPAS.cuh"
#include "JKIP.cuh"
#include "PJKIP.cuh"
#include "PGJ.cuh"
#include "PGS.cuh"
System::System()
{
gravity = make_double3(0,-9.81,0);
frictionCoefficient = 0.25;
tol = 1e-8;
h = 1e-3;
timeIndex = 0;
time = 0;
elapsedTime = 0;
totalGPUMemoryUsed = 0;
offsetConstraintsDOF = 0;
objectiveCCP = 0;
collisionDetector = new CollisionDetector(this);
solver = new APGD(this);
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
wt6.push_back(0.17132449);
wt6.push_back(0.36076157);
wt6.push_back(0.46791393);
wt6.push_back(0.46791393);
wt6.push_back(0.36076157);
wt6.push_back(0.17132449);
pt6.push_back(-0.93246951);
pt6.push_back(-0.66120939);
pt6.push_back(-0.23861918);
pt6.push_back(0.23861918);
pt6.push_back(0.66120939);
pt6.push_back(0.93246951);
}
System::System(int solverType)
{
gravity = make_double3(0,-9.81,0);
frictionCoefficient = 0.25;
tol = 1e-8;
h = 1e-3;
timeIndex = 0;
time = 0;
elapsedTime = 0;
totalGPUMemoryUsed = 0;
offsetConstraintsDOF = 0;
objectiveCCP = 0;
collisionDetector = new CollisionDetector(this);
switch(solverType) {
case 1:
solver = new APGD(this);
break;
case 2:
solver = new PDIP(this);
break;
case 3:
solver = new TPAS(this);
break;
case 4:
solver = new JKIP(this);
break;
case 5:
solver = new PGJ(this);
break;
case 6:
solver = new PGS(this);
break;
case 7:
solver = new PJKIP(this);
break;
default:
solver = new APGD(this);
}
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
wt6.push_back(0.17132449);
wt6.push_back(0.36076157);
wt6.push_back(0.46791393);
wt6.push_back(0.46791393);
wt6.push_back(0.36076157);
wt6.push_back(0.17132449);
pt6.push_back(-0.93246951);
pt6.push_back(-0.66120939);
pt6.push_back(-0.23861918);
pt6.push_back(0.23861918);
pt6.push_back(0.66120939);
pt6.push_back(0.93246951);
}
void System::setTimeStep(double step_size)
{
h = step_size;
}
void System::setFrictionCoefficient(double mu)
{
frictionCoefficient = mu;
}
int System::add(Body* body) {
//add the body
bodies.push_back(body);
return bodies.size();
}
int System::add(Beam* beam) {
//add the beam
beam->sys = this;
beams.push_back(beam);
return beams.size();
}
int System::add(Plate* plate) {
//add the plate
plate->sys = this;
plates.push_back(plate);
return plates.size();
}
int System::add(Body2D* body2D) {
//add the plate
body2D->sys = this;
body2Ds.push_back(body2D);
return body2Ds.size();
}
int System::initializeDevice() {
indices_d = indices_h;
p_d = p_h;
v_d = v_h;
a_d = a_h;
f_d = f_h;
f_contact_d = f_contact_h;
tmp_d = tmp_h;
r_d = r_h;
b_d = b_h;
k_d = k_h;
gamma_d = a_h;
friction_d = a_h;
fApplied_d = fApplied_h;
fElastic_d = fElastic_h;
massI_d = massI_h;
massJ_d = massJ_h;
mass_d = mass_h;
contactGeometry_d = contactGeometry_h;
collisionGeometry_d = collisionGeometry_h;
collisionMap_d = collisionMap_h;
materialsBeam_d = materialsBeam_h;
materialsPlate_d = materialsPlate_h;
materialsBody2D_d = materialsBody2D_h;
fixedBodies_d = fixedBodies_h;
strainDerivative_d = strainDerivative_h;
strain_d = strain_h;
strainEnergy_d = strainEnergy_h;
strainPlate_d = strainPlate_h;
strainEnergyPlate_d = strainEnergyPlate_h;
strainDerivativePlate_d = strainDerivativePlate_h;
curvatureDerivativePlate_d = curvatureDerivativePlate_h;
Sx_d = Sx_h;
Sxx_d = Sxx_h;
Sy_d = Sy_h;
Syy_d = Syy_h;
strainPlate0_d = strainPlate0_h;
curvaturePlate0_d = curvaturePlate0_h;
strainBeam0_d = strainBeam0_h;
curvatureBeam0_d = curvatureBeam0_h;
// Shell Mesh Initialization
fElasticShellMesh_d = fElasticShellMesh_h;
strainShellMesh_d = strainShellMesh_h;
strainEnergyShellMesh_d = strainEnergyShellMesh_h;
strainDerivativeShellMesh_d = strainDerivativeShellMesh_h;
curvatureDerivativeShellMesh_d = curvatureDerivativeShellMesh_h;
Sx_shellMesh_d = Sx_shellMesh_h;
Sxx_shellMesh_d = Sxx_shellMesh_h;
Sy_shellMesh_d = Sy_shellMesh_h;
Syy_shellMesh_d = Syy_shellMesh_h;
strainShellMesh0_d = strainShellMesh0_h;
curvatureShellMesh0_d = curvatureShellMesh0_h;
// End Shell Mesh Initialization
thrust::device_ptr<double> wrapped_device_p(CASTD1(p_d));
thrust::device_ptr<double> wrapped_device_v(CASTD1(v_d));
thrust::device_ptr<double> wrapped_device_a(CASTD1(a_d));
thrust::device_ptr<double> wrapped_device_f(CASTD1(f_d));
thrust::device_ptr<double> wrapped_device_f_contact(CASTD1(f_contact_d));
thrust::device_ptr<double> wrapped_device_fApplied(CASTD1(fApplied_d));
thrust::device_ptr<double> wrapped_device_fElastic(CASTD1(fElastic_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_k(CASTD1(k_d));
thrust::device_ptr<double> wrapped_device_gamma(CASTD1(gamma_d));
int offset_shellMesh = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
p = DeviceValueArrayView(wrapped_device_p, wrapped_device_p + p_d.size());
v = DeviceValueArrayView(wrapped_device_v, wrapped_device_v + v_d.size());
v_shellMesh = DeviceValueArrayView(wrapped_device_v + offset_shellMesh, wrapped_device_v + v_d.size());
a = DeviceValueArrayView(wrapped_device_a, wrapped_device_a + a_d.size());
f = DeviceValueArrayView(wrapped_device_f, wrapped_device_f + f_d.size());
f_contact = DeviceValueArrayView(wrapped_device_f_contact, wrapped_device_f_contact + f_contact_d.size());
fApplied = DeviceValueArrayView(wrapped_device_fApplied, wrapped_device_fApplied + fApplied_d.size());
fElastic = DeviceValueArrayView(wrapped_device_fElastic, wrapped_device_fElastic + fElastic_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
tmp_shellMesh = DeviceValueArrayView(wrapped_device_tmp + offset_shellMesh, wrapped_device_tmp + tmp_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
k = DeviceValueArrayView(wrapped_device_k, wrapped_device_k + k_d.size());
k_shellMesh = DeviceValueArrayView(wrapped_device_k + offset_shellMesh, wrapped_device_k + k_d.size());
gamma = DeviceValueArrayView(wrapped_device_gamma, wrapped_device_gamma + gamma_d.size());
// create mass matrix using cusp library (shouldn't change)
thrust::device_ptr<int> wrapped_device_I(CASTI1(massI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + massI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(massJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + massJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(mass_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + mass_d.size());
mass = DeviceView(a_d.size(), a_d.size(), mass_d.size(), row_indices, column_indices, values);
mass.sort_by_row();
// end create mass matrix
// create shellMesh mass matrix using cusp
thrust::device_ptr<int> wrapped_device_I_shell(CASTI1(massShellI_d));
DeviceIndexArrayView row_indices_shell = DeviceIndexArrayView(wrapped_device_I_shell, wrapped_device_I_shell + massShellI_h.size());
thrust::device_ptr<int> wrapped_device_J_shell(CASTI1(massShellJ_d));
DeviceIndexArrayView column_indices_shell = DeviceIndexArrayView(wrapped_device_J_shell, wrapped_device_J_shell + massShellJ_h.size());
thrust::device_ptr<double> wrapped_device_V_shell(CASTD1(massShell_d));
DeviceValueArrayView values_shell = DeviceValueArrayView(wrapped_device_V_shell, wrapped_device_V_shell + massShell_d.size());
mass_shellMesh = DeviceView(3*nodes_h.size(), 3*nodes_h.size(), massShell_d.size(), row_indices_shell, column_indices_shell, values_shell);
mass_shellMesh.sort_by_row();
// end create shellMesh mass matrix using cusp
// calculate initialize strains and curvatures
calculateInitialStrainAndCurvature();
processConstraints();
offsetBilaterals_d = offsetBilaterals_h;
constraintsBilateralDOF_d = constraintsBilateralDOF_h;
infoConstraintBilateralDOF_d = infoConstraintBilateralDOF_h;
constraintsSpherical_ShellNodeToBody2D_d =constraintsSpherical_ShellNodeToBody2D_h;
pSpherical_ShellNodeToBody2D_d = pSpherical_ShellNodeToBody2D_h;
return 0;
}
int System::processConstraints() {
// process the DOF bilaterals
int offset = 0;
for(int i=0;i<constraintsBilateralDOF_h.size();i++) {
offsetBilaterals_h.push_back(offset);
if(constraintsBilateralDOF_h[i].y<0) {
offset+=1;
infoConstraintBilateralDOF_h[i].z = p_h[constraintsBilateralDOF_h[i].x]; // need to know initial value
} else {
offset+=2;
}
}
// end process the DOF bilaterals
// process the ShellNodeToBody2D spherical constraints
for(int i=0;i<constraintsSpherical_ShellNodeToBody2D_h.size();i++) {
int indexA = constraintsSpherical_ShellNodeToBody2D_h[i].x;
int nodeIndexA = constraintsSpherical_ShellNodeToBody2D_h[i].y;
int indexB = constraintsSpherical_ShellNodeToBody2D_h[i].z;
int offsetA;
int offsetB;
if(indexA==-1) {
// shell mesh
offsetA = 3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+9*nodeIndexA;
offsetB = 3*bodies.size()+12*beams.size()+36*plates.size()+3*indexB;
} else {
// plate
offsetA = 3*bodies.size()+12*beams.size()+36*indexA+9*nodeIndexA;
offsetB = 3*bodies.size()+12*beams.size()+36*plates.size()+3*indexB;
}
constraintsSpherical_ShellNodeToBody2D_h[i].x = offsetA; // NOTE: Reset value to offsets! Easier for later constraint processing
constraintsSpherical_ShellNodeToBody2D_h[i].y = offsetB; // NOTE: Reset value to offsets! Easier for later constraint processing
pSpherical_ShellNodeToBody2D_h.push_back(make_double3(p_h[offsetA]-p_h[offsetB],p_h[offsetA+1]-p_h[offsetB+1],p_h[offsetA+2]));
}
// end process the ShellNodeToBody2D spherical constraints
return 0;
}
int System::initializeSystem() {
for(int j=0; j<bodies.size(); j++) {
Body* body = bodies[j];
body->setIdentifier(j); // Indicates the number that the Body was added
body->setIndex(p_h.size()); // Indicates the Body's location in the position array
// Push Body's location to global library
indices_h.push_back(p_h.size());
// update p
p_h.push_back(body->pos.x);
p_h.push_back(body->pos.y);
p_h.push_back(body->pos.z);
// update v
v_h.push_back(body->vel.x);
v_h.push_back(body->vel.y);
v_h.push_back(body->vel.z);
// update a
a_h.push_back(body->acc.x);
a_h.push_back(body->acc.y);
a_h.push_back(body->acc.z);
// update external force vector (gravity)
if(body->isFixed()) {
f_h.push_back(0);
f_h.push_back(0);
f_h.push_back(0);
}
else {
f_h.push_back(body->mass * this->gravity.x);
f_h.push_back(body->mass * this->gravity.y);
f_h.push_back(body->mass * this->gravity.z);
}
f_contact_h.push_back(0);
f_contact_h.push_back(0);
f_contact_h.push_back(0);
fApplied_h.push_back(0);
fApplied_h.push_back(0);
fApplied_h.push_back(0);
fElastic_h.push_back(0);
fElastic_h.push_back(0);
fElastic_h.push_back(0);
tmp_h.push_back(0);
tmp_h.push_back(0);
tmp_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
k_h.push_back(0);
k_h.push_back(0);
k_h.push_back(0);
// update the mass matrix
for(int i = 0; i < body->numDOF; i++) {
massI_h.push_back(i + body->numDOF * j);
massJ_h.push_back(i + body->numDOF * j);
if(body->isFixed()) {
mass_h.push_back(0);
}
else {
mass_h.push_back(1.0/body->mass);
}
}
contactGeometry_h.push_back(body->contactGeometry);
collisionGeometry_h.push_back(body->contactGeometry);
collisionMap_h.push_back(make_int4(body->getIdentifier(),0,0,body->getCollisionFamily()));
if(body->isFixed()) fixedBodies_h.push_back(j);
}
for(int j=0; j<beams.size(); j++) {
beams[j]->addBeam(j); //TODO: Make a function like this for body (makes code cleaner)
}
for(int j=0; j<plates.size(); j++) {
plates[j]->addPlate(j);
}
for(int j=0; j<body2Ds.size(); j++) {
body2Ds[j]->addBody2D(j);
}
// add shell mesh to system
if(nodes_h.size()) {
indices_h.push_back(p_h.size());
// update p
for(int i=0; i<nodes_h.size(); i++) {
p_h.push_back(nodes_h[i].x);
p_h.push_back(nodes_h[i].y);
p_h.push_back(nodes_h[i].z);
}
// update fext
for(int i=0; i<fextMesh_h.size(); i++) {
f_h.push_back(fextMesh_h[i]);
}
// update zero vectors
for(int i=0;i<3*nodes_h.size();i++) {
v_h.push_back(0);
a_h.push_back(0);
f_contact_h.push_back(0);
fApplied_h.push_back(0);
fElastic_h.push_back(0);
tmp_h.push_back(0);
k_h.push_back(0);
r_h.push_back(0);
}
// update the mass inverse
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
for(int i=0;i<invMassShellI_h.size();i++) {
massI_h.push_back(invMassShellI_h[i]+offset);
massJ_h.push_back(invMassShellJ_h[i]+offset);
mass_h.push_back(invMassShell_h[i]);
}
for(int j=0;j<shellConnectivities_h.size();j++) {
contactGeometry_h.push_back(make_double3(shellGeometries_h[j].x,shellGeometries_h[j].y,shellGeometries_h[j].w));
for(int i=0;i<36;i++) {
fElasticShellMesh_h.push_back(0);
strainDerivativeShellMesh_h.push_back(make_double3(0,0,0));
curvatureDerivativeShellMesh_h.push_back(make_double3(0,0,0));
}
strainEnergyShellMesh_h.push_back(0);
strainShellMesh_h.push_back(make_double3(0,0,0));
for(int i=0;i<wt6.size()*pt6.size();i++) strainShellMesh0_h.push_back(make_double3(0,0,0));
for(int i=0;i<wt5.size()*pt5.size();i++) curvatureShellMesh0_h.push_back(make_double3(0,0,0));
for(int i=0;i<12;i++) {
Sx_shellMesh_h.push_back(0);
Sxx_shellMesh_h.push_back(0);
Sy_shellMesh_h.push_back(0);
Syy_shellMesh_h.push_back(0);
}
for(int i=1;i<shellGeometries_h[j].w-1;i++) {
for(int k=1;k<shellGeometries_h[j].w-1;k++) {
collisionGeometry_h.push_back(make_double3(0.5*shellGeometries_h[j].z,0,0));
collisionMap_h.push_back(make_int4(plates.size()+beams.size()+bodies.size()+body2Ds.size()+j,i,k,-2));
}
}
}
}
initializeDevice();
solver->setup();
return 0;
}
int System::addBilateralConstraintDOF(int DOFA, int DOFB) {
constraintsBilateralDOF_h.push_back(make_int2(DOFA,DOFB));
infoConstraintBilateralDOF_h.push_back(make_double3(0,0,0));
if(DOFB<0) {
offsetConstraintsDOF=offsetConstraintsDOF+1;
} else {
offsetConstraintsDOF=offsetConstraintsDOF+2;
}
return 0;
}
int System::addBilateralConstraintDOF(int DOFA, int DOFB, double velocity, double startTime) {
constraintsBilateralDOF_h.push_back(make_int2(DOFA,DOFB));
infoConstraintBilateralDOF_h.push_back(make_double3(velocity,startTime,0));
if(DOFB<0) {
offsetConstraintsDOF=offsetConstraintsDOF+1;
} else {
offsetConstraintsDOF=offsetConstraintsDOF+2;
}
return 0;
}
int System::pinShellNodeToBody2D(int shellNodeIndex, int body2Dindex) {
constraintsSpherical_ShellNodeToBody2D_h.push_back(make_int3(-1,shellNodeIndex,body2Dindex));
return 0;
}
int System::pinPlateNodeToBody2D(int plateIndex, int plateNodeIndex, int body2Dindex) {
constraintsSpherical_ShellNodeToBody2D_h.push_back(make_int3(plateIndex,plateNodeIndex,body2Dindex));
return 0;
}
int System::DoTimeStep() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
objectiveCCP = 0;
// Perform collision detection
if(collisionGeometry_d.size()) {
collisionDetector->generateAxisAlignedBoundingBoxes();
collisionDetector->detectPossibleCollisions_spatialSubdivision();
collisionDetector->detectCollisions();
}
buildAppliedImpulseVector();
if(collisionDetector->numCollisions||constraintsBilateralDOF_d.size()||constraintsSpherical_ShellNodeToBody2D_d.size()) {
// Set up the QOCC
buildContactJacobian();
buildSchurVector();
// Solve the QOCC
solver->solve();
// Perform time integration (contacts) TODO: Get rid of constraint forces in f_contact vector!
cusp::multiply(DT,gamma,f_contact);
cusp::blas::axpby(k,f_contact,tmp,1.0,1.0);
cusp::multiply(mass,tmp,v);
cusp::blas::scal(f_contact,1.0/h);
}
else {
// Perform time integration (no contacts)
cusp::multiply(mass,k,v);
cusp::blas::fill(f_contact,0.0);
}
cusp::blas::axpy(v, p, h);
time += h;
timeIndex++;
//p_h = p_d;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float execTime;
hipEventElapsedTime(&execTime, start, stop);
elapsedTime = execTime;
printf("Time: %f (Exec. Time: %f), Collisions: %d (%d possible)\n",time,elapsedTime,collisionDetector->numCollisions, (int)collisionDetector->numPossibleCollisions);
size_t avail;
size_t total;
hipMemGetInfo( &avail, &total );
size_t used = total - avail;
totalGPUMemoryUsed = used/1000000.0;
cout << " Device memory used: " << totalGPUMemoryUsed << " MB (Avail: " << avail/1000000 << " MB)" << endl;
return 0;
}
int System::applyForce(Body* body, double3 force) {
int index = body->getIndex();
//cout << index << endl;
fApplied_h[index]+=force.x;
fApplied_h[index+1]+=force.y;
fApplied_h[index+2]+=force.z;
return 0;
}
int System::clearAppliedForces() {
Thrust_Fill(fApplied_d,0.0);
fApplied_h = fApplied_d;
return 0;
}
__global__ void constructBilateralJacobian(int2* constraintBilateralDOF, int* offsets, int* DI, int* DJ, double* D, uint numConstraintsBilateral) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraintsBilateral);
int2 bilateralDOFs = constraintBilateralDOF[index];
int offset = offsets[index];
DI[offset] = index;
DJ[offset] = bilateralDOFs.x;
D[offset] = 1.0;
if(bilateralDOFs.y>=0)
{
DI[offset+1] = index;
DJ[offset+1] = bilateralDOFs.y;
D[offset+1] = -1.0;
}
}
__global__ void constructSpherical_ShellNodeToBody2DJacobian(int3* constraints, double3* pHats, double* p, int* DI, int* DJ, double* D, int numConstraintsDOF, int offsetConstraintsDOF, uint numConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraints);
int offset = offsetConstraintsDOF;
int3 constraint = constraints[index];
double3 pHat = pHats[index];
int offsetS = constraint.x;
int offsetB = constraint.y;
DI[7*index+0+offset] = 3*index+numConstraintsDOF;
DI[7*index+1+offset] = 3*index+numConstraintsDOF;
DI[7*index+2+offset] = 3*index+numConstraintsDOF;
DI[7*index+3+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+4+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+5+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+6+offset] = 3*index+2+numConstraintsDOF;
DJ[7*index+0+offset] = offsetB;
DJ[7*index+1+offset] = offsetB+2;
DJ[7*index+2+offset] = offsetS;
DJ[7*index+3+offset] = offsetB+1;
DJ[7*index+4+offset] = offsetB+2;
DJ[7*index+5+offset] = offsetS+1;
DJ[7*index+6+offset] = offsetS+2;
double phi = p[offsetB+2];
D[7*index+0+offset] = 1.0;
D[7*index+1+offset] = -pHat.x*sin(phi)-pHat.y*cos(phi);
D[7*index+2+offset] = -1.0;
D[7*index+3+offset] = 1.0;
D[7*index+4+offset] = pHat.x*cos(phi)-pHat.y*sin(phi);
D[7*index+5+offset] = -1.0;
D[7*index+6+offset] = -1.0;
}
__global__ void constructContactJacobian(int* nonzerosPerContact_d, int4* collisionMap, int4* connectivities, double3* geometries, double3* collisionGeometry, int* DI, int* DJ, double* D, double* friction, double mu, double4* normalsAndPenetrations, uint* collisionIdentifierA, uint* collisionIdentifierB, int* indices, int numBodies, int numBeams, int numPlates, int numBodys2D, uint offsetConstraintsBilateral, uint numConstraintsBilateral, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
friction[index] = mu; // TODO: EDIT THIS TO BE MINIMUM OF FRICTION COEFFICIENTS
bool shellMeshA = false;
bool shellMeshB = false;
int4 connectivityA;
int4 connectivityB;
int shellOffset = 3*numBodies+12*numBeams+36*numPlates+3*numBodys2D;
int offsetA = (!index) ? 0 : nonzerosPerContact_d[index - 1];
offsetA+=offsetConstraintsBilateral; // add offset for bilateral constraints
DI = &DI[offsetA];
DJ = &DJ[offsetA];
D = &D[offsetA];
int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
int endA = 3;
if(bodyIdentifierA<numBodies) {
endA = 3; // body
}
else if(bodyIdentifierA<(numBodies+numBeams)) {
endA = 12; // beam
}
else if(bodyIdentifierA<(numBodies+numBeams+numPlates)) {
endA = 36; // plate
}
else if(bodyIdentifierA<(numBodies+numBeams+numPlates+numBodys2D)) {
endA = 3; // body2D
}
else {
endA = 36; // shellMesh
shellMeshA = true;
connectivityA = connectivities[bodyIdentifierA-(numBodies+numBeams+numPlates+numBodys2D)];
}
int endB = 3;
if(bodyIdentifierB<numBodies) {
endB = 3; // body
}
else if(bodyIdentifierB<(numBodies+numBeams)) {
endB = 12; // beam
}
else if(bodyIdentifierB<(numBodies+numBeams+numPlates)) {
endB = 36; // plate
}
else if(bodyIdentifierB<(numBodies+numBeams+numPlates+numBodys2D)) {
endB = 3; // body2D
}
else {
endB = 36; // shellMesh
shellMeshB = true;
connectivityB = connectivities[bodyIdentifierB-(numBodies+numBeams+numPlates+numBodys2D)];
}
int indexA = indices[bodyIdentifierA];
int indexB = indices[bodyIdentifierB];
double xiA = static_cast<double>(collisionMap[collisionIdentifierA[index]].y)/(static_cast<double>(geometries[bodyIdentifierA].z-1));
double etaA = static_cast<double>(collisionMap[collisionIdentifierA[index]].z)/(static_cast<double>(geometries[bodyIdentifierA].z-1));
double aA = geometries[bodyIdentifierA].x;
double bA = geometries[bodyIdentifierA].y;
double lA = bA;
double xiB = static_cast<double>(collisionMap[collisionIdentifierB[index]].y)/(static_cast<double>(geometries[bodyIdentifierB].z-1));
double etaB = static_cast<double>(collisionMap[collisionIdentifierB[index]].z)/(static_cast<double>(geometries[bodyIdentifierB].z-1));
double aB = geometries[bodyIdentifierB].x;
double bB = geometries[bodyIdentifierB].y;
double lB = bB;
double4 nAndP;
double3 n, u, v;
nAndP = normalsAndPenetrations[index];
n = make_double3(nAndP.x,nAndP.y,nAndP.z);
if(n.z != 0) {
u = normalize(make_double3(1,0,-n.x/n.z));
}
else if(n.x != 0) {
u = normalize(make_double3(-n.z/n.x,0,1));
}
else {
u = normalize(make_double3(1,-n.x/n.y,0));
}
v = normalize(cross(n,u));
// Add n, i indices
int i;
int end = endA;
int j = 0;
for(i=0;i<end;i++) {
DI[i] = 3*index+0+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+0+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add u, i indices
end+=endA;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+1+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+1+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add v, i indices
end+=endA;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+2+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+2+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add n, values
int startIndex = 0;
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = n.x;
D[startIndex+1] = n.y;
D[startIndex+2] = n.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = n.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = n.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = n.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*n.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*n.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*n.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = n.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = n.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = n.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*n.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*n.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*n.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = n.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = n.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = n.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*n.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*n.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*n.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*n.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*n.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*n.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -n.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -n.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -n.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*n.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*n.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*n.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*n.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*n.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*n.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*n.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*n.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*n.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*n.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*n.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*n.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*n.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*n.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*n.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*n.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*n.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*n.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*n.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*n.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*n.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*n.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*n.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*n.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -n.x;
D[startIndex+1] = -n.y;
D[startIndex+2] = -n.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -n.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -n.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -n.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*n.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*n.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*n.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -n.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -n.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -n.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*n.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*n.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*n.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -n.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -n.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -n.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*n.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*n.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*n.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*n.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*n.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*n.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = n.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = n.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = n.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*n.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*n.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*n.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*n.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*n.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*n.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*n.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*n.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*n.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*n.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*n.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*n.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*n.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*n.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*n.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*n.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*n.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*n.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*n.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*n.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*n.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*n.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*n.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*n.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
// Add u, values
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = u.x;
D[startIndex+1] = u.y;
D[startIndex+2] = u.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = u.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = u.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = u.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*u.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*u.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*u.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = u.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = u.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = u.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*u.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*u.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*u.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = u.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = u.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = u.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*u.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*u.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*u.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*u.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*u.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*u.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -u.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -u.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -u.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*u.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*u.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*u.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*u.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*u.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*u.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*u.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*u.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*u.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*u.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*u.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*u.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*u.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*u.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*u.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*u.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*u.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*u.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*u.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*u.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*u.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*u.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*u.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*u.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -u.x;
D[startIndex+1] = -u.y;
D[startIndex+2] = -u.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -u.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -u.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -u.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*u.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*u.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*u.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -u.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -u.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -u.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*u.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*u.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*u.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -u.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -u.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -u.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*u.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*u.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*u.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*u.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*u.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*u.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = u.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = u.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = u.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*u.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*u.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*u.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*u.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*u.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*u.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*u.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*u.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*u.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*u.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*u.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*u.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*u.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*u.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*u.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*u.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*u.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*u.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*u.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*u.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*u.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*u.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*u.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*u.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
// Add v, values
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = v.x;
D[startIndex+1] = v.y;
D[startIndex+2] = v.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = v.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = v.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = v.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*v.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*v.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*v.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = v.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = v.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = v.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*v.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*v.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*v.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = v.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = v.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = v.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*v.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*v.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*v.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*v.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*v.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*v.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -v.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -v.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -v.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*v.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*v.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*v.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*v.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*v.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*v.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*v.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*v.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*v.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*v.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*v.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*v.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*v.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*v.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*v.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*v.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*v.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*v.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*v.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*v.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*v.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*v.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*v.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*v.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -v.x;
D[startIndex+1] = -v.y;
D[startIndex+2] = -v.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -v.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -v.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -v.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*v.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*v.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*v.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -v.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -v.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -v.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*v.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*v.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*v.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -v.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -v.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -v.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*v.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*v.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*v.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*v.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*v.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*v.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = v.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = v.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = v.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*v.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*v.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*v.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*v.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*v.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*v.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*v.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*v.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*v.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*v.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*v.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*v.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*v.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*v.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*v.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*v.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*v.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*v.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*v.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*v.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*v.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*v.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*v.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*v.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
}
__global__ void updateContactForcePerCollision(double3* normalForcePerCollision, double3* frictionForcePerCollision, double* gammaGlobal, double4* normalsAndPenetrations, double h, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
//int4* collisionMap, uint* collisionIdentifierA, uint* collisionIdentifierB,
//int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
//int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
double4 nAndP;
double3 n, u, v;
nAndP = normalsAndPenetrations[index];
n = make_double3(nAndP.x,nAndP.y,nAndP.z);
if(n.z != 0) {
u = normalize(make_double3(1,0,-n.x/n.z));
}
else if(n.x != 0) {
u = normalize(make_double3(-n.z/n.x,0,1));
}
else {
u = normalize(make_double3(1,-n.x/n.y,0));
}
v = normalize(cross(n,u));
double3 gamma = make_double3(gammaGlobal[3*index],gammaGlobal[3*index+1],gammaGlobal[3*index+2]);
normalForcePerCollision[index] = gamma.x*n/h;
frictionForcePerCollision[index] = (gamma.y*u+gamma.z*v)/h;
}
int System::calculateContactForcePerCollision() {
normalForcePerCollision_d.resize(collisionDetector->numCollisions);
frictionForcePerCollision_d.resize(collisionDetector->numCollisions);
if(collisionDetector->numCollisions)hipLaunchKernelGGL(( updateContactForcePerCollision), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD3(normalForcePerCollision_d), CASTD3(frictionForcePerCollision_d), CASTD1(gamma_d), CASTD4(collisionDetector->normalsAndPenetrations_d), h, collisionDetector->numCollisions);
return 0;
}
int System::outputContactForcePerCollision() {
calculateContactForcePerCollision();
// copy device data to host
thrust::host_vector<double3> collisionLocations_h = collisionDetector->collisionLocations_d;
thrust::host_vector<double3> normalForcePerCollision_h = normalForcePerCollision_d;
thrust::host_vector<double3> frictionForcePerCollision_h = frictionForcePerCollision_d;
thrust::host_vector<int4> collisionMap_h = collisionMap_d;
thrust::host_vector<uint> collisionIdentifierA_h = collisionDetector->collisionIdentifierA_d;
thrust::host_vector<uint> collisionIdentifierB_h = collisionDetector->collisionIdentifierB_d;
for(int i=0; i<collisionDetector->numCollisions;i++) {
int bodyIdentifierA = collisionMap_h[collisionIdentifierA_h[i]].x;
int bodyIdentifierB = collisionMap_h[collisionIdentifierB_h[i]].x;
printf("Collision #%d: Body %d to %d\n",i,bodyIdentifierA,bodyIdentifierB);
printf(" Collision location: (%f, %f, %f)\n", collisionLocations_h[i].x,collisionLocations_h[i].y,collisionLocations_h[i].z);
printf(" Collision normal: (%f, %f, %f)\n", normalForcePerCollision_h[i].x,normalForcePerCollision_h[i].y,normalForcePerCollision_h[i].z);
printf(" Collision friction: (%f, %f, %f)\n\n", frictionForcePerCollision_h[i].x,frictionForcePerCollision_h[i].y,frictionForcePerCollision_h[i].z);
}
return 0;
}
__global__ void updateNonzerosPerContact(int* nonzerosPerContact, int4* collisionMap, uint* collisionIdentifierA, uint* collisionIdentifierB, int numBodies, int numBeams, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
int numNonzeros = 0;
int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
if(bodyIdentifierA<numBodies) {
numNonzeros+=9;
}
else if(bodyIdentifierA<(numBodies+numBeams)) {
numNonzeros+=36;
}
else {
numNonzeros+=108;
}
if(bodyIdentifierB<numBodies) {
numNonzeros+=9;
}
else if(bodyIdentifierB<(numBodies+numBeams)) {
numNonzeros+=36;
}
else {
numNonzeros+=108;
}
nonzerosPerContact[index] = numNonzeros;
}
int System::buildContactJacobian() {
// update nonzeros per contact
int totalNonzeros = 0;
nonzerosPerContact_d.resize(collisionDetector->numCollisions);
if(collisionDetector->numCollisions) {
hipLaunchKernelGGL(( updateNonzerosPerContact), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTI1(nonzerosPerContact_d), CASTI4(collisionMap_d), CASTU1(collisionDetector->collisionIdentifierA_d), CASTU1(collisionDetector->collisionIdentifierB_d), bodies.size(), beams.size(), collisionDetector->numCollisions);
Thrust_Inclusive_Scan_Sum(nonzerosPerContact_d, totalNonzeros);
}
totalNonzeros+=offsetConstraintsDOF+7*constraintsSpherical_ShellNodeToBody2D_d.size(); //Add in space for the bilateralDOF entries
DI_d.resize(totalNonzeros);
DJ_d.resize(totalNonzeros);
D_d.resize(totalNonzeros);
friction_d.resize(collisionDetector->numCollisions);
if(constraintsBilateralDOF_d.size())hipLaunchKernelGGL(( constructBilateralJacobian), dim3(BLOCKS(constraintsBilateralDOF_d.size())),dim3(THREADS), 0, 0, CASTI2(constraintsBilateralDOF_d), CASTI1(offsetBilaterals_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), constraintsBilateralDOF_d.size());
if(constraintsSpherical_ShellNodeToBody2D_d.size())hipLaunchKernelGGL(( constructSpherical_ShellNodeToBody2DJacobian), dim3(BLOCKS(constraintsSpherical_ShellNodeToBody2D_d.size())),dim3(THREADS), 0, 0, CASTI3(constraintsSpherical_ShellNodeToBody2D_d), CASTD3(pSpherical_ShellNodeToBody2D_d), CASTD1(p_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), constraintsBilateralDOF_d.size(), offsetConstraintsDOF, constraintsSpherical_ShellNodeToBody2D_d.size());
if(collisionDetector->numCollisions)hipLaunchKernelGGL(( constructContactJacobian), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTI1(nonzerosPerContact_d), CASTI4(collisionMap_d), CASTI4(shellConnectivities_d), CASTD3(contactGeometry_d), CASTD3(collisionGeometry_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), CASTD1(friction_d), frictionCoefficient, CASTD4(collisionDetector->normalsAndPenetrations_d), CASTU1(collisionDetector->collisionIdentifierA_d), CASTU1(collisionDetector->collisionIdentifierB_d), CASTI1(indices_d), bodies.size(), beams.size(), plates.size(), body2Ds.size(), offsetConstraintsDOF+7*constraintsSpherical_ShellNodeToBody2D_d.size(), constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), collisionDetector->numCollisions);
// create contact jacobian using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(DI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + DI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(DJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + DJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(D_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + D_d.size());
D = DeviceView(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), 3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+3*nodes_h.size(), D_d.size(), row_indices, column_indices, values);
// end create contact jacobian
buildContactJacobianTranspose();
return 0;
}
int System::buildContactJacobianTranspose() {
DTI_d = DJ_d;
DTJ_d = DI_d;
DT_d = D_d;
// create contact jacobian using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(DTI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + DI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(DTJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + DJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(DT_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + D_d.size());
DT = DeviceView(3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+3*nodes_h.size(), 3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), DT_d.size(), row_indices, column_indices, values);
// end create contact jacobian
DT.sort_by_row(); // TODO: Do I need this?
return 0;
}
__global__ void multiplyByMass(double* massInv, double* src, double* dst, uint numDOF) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numDOF);
double mass = massInv[index];
if(mass) mass = 1.0/mass;
dst[index] = mass*src[index];
}
__global__ void multiplyByBeamMass(double3* geometries, double3* materials, double* src, double* dst, uint numBodies, uint numBeams) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBeams);
double3 geometry = geometries[numBodies+index];
double A = PI*geometry.x*geometry.x;
double l = geometry.y;
double rho = materials[index].x;
uint offset = 3*numBodies+12*index;
dst[offset+0 ] = (13.0*A*rho*src[0+offset])/35.0 + (9.0*A*rho*src[6+offset])/70.0 + (11.0*A*l*rho*src[3+offset])/210.0 - (13.0*A*l*rho*src[9 +offset])/420.0;
dst[offset+1 ] = (13.0*A*rho*src[1+offset])/35.0 + (9.0*A*rho*src[7+offset])/70.0 + (11.0*A*l*rho*src[4+offset])/210.0 - (13.0*A*l*rho*src[10+offset])/420.0;
dst[offset+2 ] = (13.0*A*rho*src[2+offset])/35.0 + (9.0*A*rho*src[8+offset])/70.0 + (11.0*A*l*rho*src[5+offset])/210.0 - (13.0*A*l*rho*src[11+offset])/420.0;
dst[offset+3 ] = (A*l*l*rho*src[3+offset])/105.0 - (A*l*l*rho*src[9 +offset])/140.0 + (11.0*A*l*rho*src[0+offset])/210.0 + (13.0*A*l*rho*src[6+offset])/420.0;
dst[offset+4 ] = (A*l*l*rho*src[4+offset])/105.0 - (A*l*l*rho*src[10+offset])/140.0 + (11.0*A*l*rho*src[1+offset])/210.0 + (13.0*A*l*rho*src[7+offset])/420.0;
dst[offset+5 ] = (A*l*l*rho*src[5+offset])/105.0 - (A*l*l*rho*src[11+offset])/140.0 + (11.0*A*l*rho*src[2+offset])/210.0 + (13.0*A*l*rho*src[8+offset])/420.0;
dst[offset+6 ] = (9.0*A*rho*src[0+offset])/70.0 + (13.0*A*rho*src[6+offset])/35.0 + (13.0*A*l*rho*src[3+offset])/420.0 - (11.0*A*l*rho*src[9 +offset])/210.0;
dst[offset+7 ] = (9.0*A*rho*src[1+offset])/70.0 + (13.0*A*rho*src[7+offset])/35.0 + (13.0*A*l*rho*src[4+offset])/420.0 - (11.0*A*l*rho*src[10+offset])/210.0;
dst[offset+8 ] = (9.0*A*rho*src[2+offset])/70.0 + (13.0*A*rho*src[8+offset])/35.0 + (13.0*A*l*rho*src[5+offset])/420.0 - (11.0*A*l*rho*src[11+offset])/210.0;
dst[offset+9 ] = (A*l*l*rho*src[9 +offset])/105.0 - (A*l*l*rho*src[3+offset])/140.0 - (13.0*A*l*rho*src[0+offset])/420.0 - (11.0*A*l*rho*src[6+offset])/210.0;
dst[offset+10] = (A*l*l*rho*src[10+offset])/105.0 - (A*l*l*rho*src[4+offset])/140.0 - (13.0*A*l*rho*src[1+offset])/420.0 - (11.0*A*l*rho*src[7+offset])/210.0;
dst[offset+11] = (A*l*l*rho*src[11+offset])/105.0 - (A*l*l*rho*src[5+offset])/140.0 - (13.0*A*l*rho*src[2+offset])/420.0 - (11.0*A*l*rho*src[8+offset])/210.0;
}
__global__ void multiplyByPlateMass(double3* geometries, double4* materials, double* src, double* dst, uint numBodies, uint numBeams, uint numPlates) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numPlates);
double3 geometry = geometries[numBodies+numBeams+index];
double a = geometry.x;
double b = geometry.y;
double rho = materials[index].x;
double th = materials[index].w;
uint offset = 3*numBodies+12*numBeams+36*index;
dst[offset+0] = rho*th*src[0+offset]*1.370634920634921E-1+rho*th*src[9+offset]*4.865079365079365E-2+rho*th*src[18+offset]*1.563492063492063E-2+rho*th*src[27+offset]*4.865079365079365E-2+a*rho*th*src[3+offset]*1.829365079365079E-2-a*rho*th*src[12+offset]*1.087301587301587E-2-a*rho*th*src[21+offset]*4.603174603174603E-3+a*rho*th*src[30+offset]*7.896825396825397E-3+b*rho*th*src[6+offset]*1.829365079365079E-2+b*rho*th*src[15+offset]*7.896825396825397E-3-b*rho*th*src[24+offset]*4.603174603174603E-3-b*rho*th*src[33+offset]*1.087301587301587E-2;
dst[offset+1] = rho*th*src[1+offset]*1.370634920634921E-1+rho*th*src[10+offset]*4.865079365079365E-2+rho*th*src[19+offset]*1.563492063492063E-2+rho*th*src[28+offset]*4.865079365079365E-2+a*rho*th*src[4+offset]*1.829365079365079E-2-a*rho*th*src[13+offset]*1.087301587301587E-2-a*rho*th*src[22+offset]*4.603174603174603E-3+a*rho*th*src[31+offset]*7.896825396825397E-3+b*rho*th*src[7+offset]*1.829365079365079E-2+b*rho*th*src[16+offset]*7.896825396825397E-3-b*rho*th*src[25+offset]*4.603174603174603E-3-b*rho*th*src[34+offset]*1.087301587301587E-2;
dst[offset+2] = rho*th*src[2+offset]*1.370634920634921E-1+rho*th*src[11+offset]*4.865079365079365E-2+rho*th*src[20+offset]*1.563492063492063E-2+rho*th*src[29+offset]*4.865079365079365E-2+a*rho*th*src[5+offset]*1.829365079365079E-2-a*rho*th*src[14+offset]*1.087301587301587E-2-a*rho*th*src[23+offset]*4.603174603174603E-3+a*rho*th*src[32+offset]*7.896825396825397E-3+b*rho*th*src[8+offset]*1.829365079365079E-2+b*rho*th*src[17+offset]*7.896825396825397E-3-b*rho*th*src[26+offset]*4.603174603174603E-3-b*rho*th*src[35+offset]*1.087301587301587E-2;
dst[offset+3] = a*rho*th*src[0+offset]*1.829365079365079E-2+a*rho*th*src[9+offset]*1.087301587301587E-2+a*rho*th*src[18+offset]*4.603174603174603E-3+a*rho*th*src[27+offset]*7.896825396825397E-3+(a*a)*rho*th*src[3+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[12+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[21+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[30+offset]*(1.0/6.3E2)+a*b*rho*th*src[6+offset]*(1.0/4.0E2)+a*b*rho*th*src[15+offset]*(1.0/6.0E2)-a*b*rho*th*src[24+offset]*(1.0/9.0E2)-a*b*rho*th*src[33+offset]*(1.0/6.0E2);
dst[offset+4] = a*rho*th*src[1+offset]*1.829365079365079E-2+a*rho*th*src[10+offset]*1.087301587301587E-2+a*rho*th*src[19+offset]*4.603174603174603E-3+a*rho*th*src[28+offset]*7.896825396825397E-3+(a*a)*rho*th*src[4+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[13+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[22+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[31+offset]*(1.0/6.3E2)+a*b*rho*th*src[7+offset]*(1.0/4.0E2)+a*b*rho*th*src[16+offset]*(1.0/6.0E2)-a*b*rho*th*src[25+offset]*(1.0/9.0E2)-a*b*rho*th*src[34+offset]*(1.0/6.0E2);
dst[offset+5] = a*rho*th*src[2+offset]*1.829365079365079E-2+a*rho*th*src[11+offset]*1.087301587301587E-2+a*rho*th*src[20+offset]*4.603174603174603E-3+a*rho*th*src[29+offset]*7.896825396825397E-3+(a*a)*rho*th*src[5+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[14+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[23+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[32+offset]*(1.0/6.3E2)+a*b*rho*th*src[8+offset]*(1.0/4.0E2)+a*b*rho*th*src[17+offset]*(1.0/6.0E2)-a*b*rho*th*src[26+offset]*(1.0/9.0E2)-a*b*rho*th*src[35+offset]*(1.0/6.0E2);
dst[offset+6] = b*rho*th*src[0+offset]*1.829365079365079E-2+b*rho*th*src[9+offset]*7.896825396825397E-3+b*rho*th*src[18+offset]*4.603174603174603E-3+b*rho*th*src[27+offset]*1.087301587301587E-2+(b*b)*rho*th*src[6+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[15+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[24+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[33+offset]*(1.0/4.2E2)+a*b*rho*th*src[3+offset]*(1.0/4.0E2)-a*b*rho*th*src[12+offset]*(1.0/6.0E2)-a*b*rho*th*src[21+offset]*(1.0/9.0E2)+a*b*rho*th*src[30+offset]*(1.0/6.0E2);
dst[offset+7] = b*rho*th*src[1+offset]*1.829365079365079E-2+b*rho*th*src[10+offset]*7.896825396825397E-3+b*rho*th*src[19+offset]*4.603174603174603E-3+b*rho*th*src[28+offset]*1.087301587301587E-2+(b*b)*rho*th*src[7+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[16+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[25+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[34+offset]*(1.0/4.2E2)+a*b*rho*th*src[4+offset]*(1.0/4.0E2)-a*b*rho*th*src[13+offset]*(1.0/6.0E2)-a*b*rho*th*src[22+offset]*(1.0/9.0E2)+a*b*rho*th*src[31+offset]*(1.0/6.0E2);
dst[offset+8] = b*rho*th*src[2+offset]*1.829365079365079E-2+b*rho*th*src[11+offset]*7.896825396825397E-3+b*rho*th*src[20+offset]*4.603174603174603E-3+b*rho*th*src[29+offset]*1.087301587301587E-2+(b*b)*rho*th*src[8+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[17+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[26+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[35+offset]*(1.0/4.2E2)+a*b*rho*th*src[5+offset]*(1.0/4.0E2)-a*b*rho*th*src[14+offset]*(1.0/6.0E2)-a*b*rho*th*src[23+offset]*(1.0/9.0E2)+a*b*rho*th*src[32+offset]*(1.0/6.0E2);
dst[offset+9] = rho*th*src[0+offset]*4.865079365079365E-2+rho*th*src[9+offset]*1.370634920634921E-1+rho*th*src[18+offset]*4.865079365079365E-2+rho*th*src[27+offset]*1.563492063492063E-2+a*rho*th*src[3+offset]*1.087301587301587E-2-a*rho*th*src[12+offset]*1.829365079365079E-2-a*rho*th*src[21+offset]*7.896825396825397E-3+a*rho*th*src[30+offset]*4.603174603174603E-3+b*rho*th*src[6+offset]*7.896825396825397E-3+b*rho*th*src[15+offset]*1.829365079365079E-2-b*rho*th*src[24+offset]*1.087301587301587E-2-b*rho*th*src[33+offset]*4.603174603174603E-3;
dst[offset+10] = rho*th*src[1+offset]*4.865079365079365E-2+rho*th*src[10+offset]*1.370634920634921E-1+rho*th*src[19+offset]*4.865079365079365E-2+rho*th*src[28+offset]*1.563492063492063E-2+a*rho*th*src[4+offset]*1.087301587301587E-2-a*rho*th*src[13+offset]*1.829365079365079E-2-a*rho*th*src[22+offset]*7.896825396825397E-3+a*rho*th*src[31+offset]*4.603174603174603E-3+b*rho*th*src[7+offset]*7.896825396825397E-3+b*rho*th*src[16+offset]*1.829365079365079E-2-b*rho*th*src[25+offset]*1.087301587301587E-2-b*rho*th*src[34+offset]*4.603174603174603E-3;
dst[offset+11] = rho*th*src[2+offset]*4.865079365079365E-2+rho*th*src[11+offset]*1.370634920634921E-1+rho*th*src[20+offset]*4.865079365079365E-2+rho*th*src[29+offset]*1.563492063492063E-2+a*rho*th*src[5+offset]*1.087301587301587E-2-a*rho*th*src[14+offset]*1.829365079365079E-2-a*rho*th*src[23+offset]*7.896825396825397E-3+a*rho*th*src[32+offset]*4.603174603174603E-3+b*rho*th*src[8+offset]*7.896825396825397E-3+b*rho*th*src[17+offset]*1.829365079365079E-2-b*rho*th*src[26+offset]*1.087301587301587E-2-b*rho*th*src[35+offset]*4.603174603174603E-3;
dst[offset+12] = a*rho*th*src[0+offset]*(-1.087301587301587E-2)-a*rho*th*src[9+offset]*1.829365079365079E-2-a*rho*th*src[18+offset]*7.896825396825397E-3-a*rho*th*src[27+offset]*4.603174603174603E-3-(a*a)*rho*th*src[3+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[12+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[21+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[30+offset]*(1.0/8.4E2)-a*b*rho*th*src[6+offset]*(1.0/6.0E2)-a*b*rho*th*src[15+offset]*(1.0/4.0E2)+a*b*rho*th*src[24+offset]*(1.0/6.0E2)+a*b*rho*th*src[33+offset]*(1.0/9.0E2);
dst[offset+13] = a*rho*th*src[1+offset]*(-1.087301587301587E-2)-a*rho*th*src[10+offset]*1.829365079365079E-2-a*rho*th*src[19+offset]*7.896825396825397E-3-a*rho*th*src[28+offset]*4.603174603174603E-3-(a*a)*rho*th*src[4+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[13+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[22+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[31+offset]*(1.0/8.4E2)-a*b*rho*th*src[7+offset]*(1.0/6.0E2)-a*b*rho*th*src[16+offset]*(1.0/4.0E2)+a*b*rho*th*src[25+offset]*(1.0/6.0E2)+a*b*rho*th*src[34+offset]*(1.0/9.0E2);
dst[offset+14] = a*rho*th*src[2+offset]*(-1.087301587301587E-2)-a*rho*th*src[11+offset]*1.829365079365079E-2-a*rho*th*src[20+offset]*7.896825396825397E-3-a*rho*th*src[29+offset]*4.603174603174603E-3-(a*a)*rho*th*src[5+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[14+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[23+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[32+offset]*(1.0/8.4E2)-a*b*rho*th*src[8+offset]*(1.0/6.0E2)-a*b*rho*th*src[17+offset]*(1.0/4.0E2)+a*b*rho*th*src[26+offset]*(1.0/6.0E2)+a*b*rho*th*src[35+offset]*(1.0/9.0E2);
dst[offset+15] = b*rho*th*src[0+offset]*7.896825396825397E-3+b*rho*th*src[9+offset]*1.829365079365079E-2+b*rho*th*src[18+offset]*1.087301587301587E-2+b*rho*th*src[27+offset]*4.603174603174603E-3+(b*b)*rho*th*src[6+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[15+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[24+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[33+offset]*(1.0/8.4E2)+a*b*rho*th*src[3+offset]*(1.0/6.0E2)-a*b*rho*th*src[12+offset]*(1.0/4.0E2)-a*b*rho*th*src[21+offset]*(1.0/6.0E2)+a*b*rho*th*src[30+offset]*(1.0/9.0E2);
dst[offset+16] = b*rho*th*src[1+offset]*7.896825396825397E-3+b*rho*th*src[10+offset]*1.829365079365079E-2+b*rho*th*src[19+offset]*1.087301587301587E-2+b*rho*th*src[28+offset]*4.603174603174603E-3+(b*b)*rho*th*src[7+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[16+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[25+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[34+offset]*(1.0/8.4E2)+a*b*rho*th*src[4+offset]*(1.0/6.0E2)-a*b*rho*th*src[13+offset]*(1.0/4.0E2)-a*b*rho*th*src[22+offset]*(1.0/6.0E2)+a*b*rho*th*src[31+offset]*(1.0/9.0E2);
dst[offset+17] = b*rho*th*src[2+offset]*7.896825396825397E-3+b*rho*th*src[11+offset]*1.829365079365079E-2+b*rho*th*src[20+offset]*1.087301587301587E-2+b*rho*th*src[29+offset]*4.603174603174603E-3+(b*b)*rho*th*src[8+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[17+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[26+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[35+offset]*(1.0/8.4E2)+a*b*rho*th*src[5+offset]*(1.0/6.0E2)-a*b*rho*th*src[14+offset]*(1.0/4.0E2)-a*b*rho*th*src[23+offset]*(1.0/6.0E2)+a*b*rho*th*src[32+offset]*(1.0/9.0E2);
dst[offset+18] = rho*th*src[0+offset]*1.563492063492063E-2+rho*th*src[9+offset]*4.865079365079365E-2+rho*th*src[18+offset]*1.370634920634921E-1+rho*th*src[27+offset]*4.865079365079365E-2+a*rho*th*src[3+offset]*4.603174603174603E-3-a*rho*th*src[12+offset]*7.896825396825397E-3-a*rho*th*src[21+offset]*1.829365079365079E-2+a*rho*th*src[30+offset]*1.087301587301587E-2+b*rho*th*src[6+offset]*4.603174603174603E-3+b*rho*th*src[15+offset]*1.087301587301587E-2-b*rho*th*src[24+offset]*1.829365079365079E-2-b*rho*th*src[33+offset]*7.896825396825397E-3;
dst[offset+19] = rho*th*src[1+offset]*1.563492063492063E-2+rho*th*src[10+offset]*4.865079365079365E-2+rho*th*src[19+offset]*1.370634920634921E-1+rho*th*src[28+offset]*4.865079365079365E-2+a*rho*th*src[4+offset]*4.603174603174603E-3-a*rho*th*src[13+offset]*7.896825396825397E-3-a*rho*th*src[22+offset]*1.829365079365079E-2+a*rho*th*src[31+offset]*1.087301587301587E-2+b*rho*th*src[7+offset]*4.603174603174603E-3+b*rho*th*src[16+offset]*1.087301587301587E-2-b*rho*th*src[25+offset]*1.829365079365079E-2-b*rho*th*src[34+offset]*7.896825396825397E-3;
dst[offset+20] = rho*th*src[2+offset]*1.563492063492063E-2+rho*th*src[11+offset]*4.865079365079365E-2+rho*th*src[20+offset]*1.370634920634921E-1+rho*th*src[29+offset]*4.865079365079365E-2+a*rho*th*src[5+offset]*4.603174603174603E-3-a*rho*th*src[14+offset]*7.896825396825397E-3-a*rho*th*src[23+offset]*1.829365079365079E-2+a*rho*th*src[32+offset]*1.087301587301587E-2+b*rho*th*src[8+offset]*4.603174603174603E-3+b*rho*th*src[17+offset]*1.087301587301587E-2-b*rho*th*src[26+offset]*1.829365079365079E-2-b*rho*th*src[35+offset]*7.896825396825397E-3;
dst[offset+21] = a*rho*th*src[0+offset]*(-4.603174603174603E-3)-a*rho*th*src[9+offset]*7.896825396825397E-3-a*rho*th*src[18+offset]*1.829365079365079E-2-a*rho*th*src[27+offset]*1.087301587301587E-2-(a*a)*rho*th*src[3+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[12+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[21+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[30+offset]*(1.0/4.2E2)-a*b*rho*th*src[6+offset]*(1.0/9.0E2)-a*b*rho*th*src[15+offset]*(1.0/6.0E2)+a*b*rho*th*src[24+offset]*(1.0/4.0E2)+a*b*rho*th*src[33+offset]*(1.0/6.0E2);
dst[offset+22] = a*rho*th*src[1+offset]*(-4.603174603174603E-3)-a*rho*th*src[10+offset]*7.896825396825397E-3-a*rho*th*src[19+offset]*1.829365079365079E-2-a*rho*th*src[28+offset]*1.087301587301587E-2-(a*a)*rho*th*src[4+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[13+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[22+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[31+offset]*(1.0/4.2E2)-a*b*rho*th*src[7+offset]*(1.0/9.0E2)-a*b*rho*th*src[16+offset]*(1.0/6.0E2)+a*b*rho*th*src[25+offset]*(1.0/4.0E2)+a*b*rho*th*src[34+offset]*(1.0/6.0E2);
dst[offset+23] = a*rho*th*src[2+offset]*(-4.603174603174603E-3)-a*rho*th*src[11+offset]*7.896825396825397E-3-a*rho*th*src[20+offset]*1.829365079365079E-2-a*rho*th*src[29+offset]*1.087301587301587E-2-(a*a)*rho*th*src[5+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[14+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[23+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[32+offset]*(1.0/4.2E2)-a*b*rho*th*src[8+offset]*(1.0/9.0E2)-a*b*rho*th*src[17+offset]*(1.0/6.0E2)+a*b*rho*th*src[26+offset]*(1.0/4.0E2)+a*b*rho*th*src[35+offset]*(1.0/6.0E2);
dst[offset+24] = b*rho*th*src[0+offset]*(-4.603174603174603E-3)-b*rho*th*src[9+offset]*1.087301587301587E-2-b*rho*th*src[18+offset]*1.829365079365079E-2-b*rho*th*src[27+offset]*7.896825396825397E-3-(b*b)*rho*th*src[6+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[15+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[24+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[33+offset]*(1.0/6.3E2)-a*b*rho*th*src[3+offset]*(1.0/9.0E2)+a*b*rho*th*src[12+offset]*(1.0/6.0E2)+a*b*rho*th*src[21+offset]*(1.0/4.0E2)-a*b*rho*th*src[30+offset]*(1.0/6.0E2);
dst[offset+25] = b*rho*th*src[1+offset]*(-4.603174603174603E-3)-b*rho*th*src[10+offset]*1.087301587301587E-2-b*rho*th*src[19+offset]*1.829365079365079E-2-b*rho*th*src[28+offset]*7.896825396825397E-3-(b*b)*rho*th*src[7+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[16+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[25+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[34+offset]*(1.0/6.3E2)-a*b*rho*th*src[4+offset]*(1.0/9.0E2)+a*b*rho*th*src[13+offset]*(1.0/6.0E2)+a*b*rho*th*src[22+offset]*(1.0/4.0E2)-a*b*rho*th*src[31+offset]*(1.0/6.0E2);
dst[offset+26] = b*rho*th*src[2+offset]*(-4.603174603174603E-3)-b*rho*th*src[11+offset]*1.087301587301587E-2-b*rho*th*src[20+offset]*1.829365079365079E-2-b*rho*th*src[29+offset]*7.896825396825397E-3-(b*b)*rho*th*src[8+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[17+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[26+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[35+offset]*(1.0/6.3E2)-a*b*rho*th*src[5+offset]*(1.0/9.0E2)+a*b*rho*th*src[14+offset]*(1.0/6.0E2)+a*b*rho*th*src[23+offset]*(1.0/4.0E2)-a*b*rho*th*src[32+offset]*(1.0/6.0E2);
dst[offset+27] = rho*th*src[0+offset]*4.865079365079365E-2+rho*th*src[9+offset]*1.563492063492063E-2+rho*th*src[18+offset]*4.865079365079365E-2+rho*th*src[27+offset]*1.370634920634921E-1+a*rho*th*src[3+offset]*7.896825396825397E-3-a*rho*th*src[12+offset]*4.603174603174603E-3-a*rho*th*src[21+offset]*1.087301587301587E-2+a*rho*th*src[30+offset]*1.829365079365079E-2+b*rho*th*src[6+offset]*1.087301587301587E-2+b*rho*th*src[15+offset]*4.603174603174603E-3-b*rho*th*src[24+offset]*7.896825396825397E-3-b*rho*th*src[33+offset]*1.829365079365079E-2;
dst[offset+28] = rho*th*src[1+offset]*4.865079365079365E-2+rho*th*src[10+offset]*1.563492063492063E-2+rho*th*src[19+offset]*4.865079365079365E-2+rho*th*src[28+offset]*1.370634920634921E-1+a*rho*th*src[4+offset]*7.896825396825397E-3-a*rho*th*src[13+offset]*4.603174603174603E-3-a*rho*th*src[22+offset]*1.087301587301587E-2+a*rho*th*src[31+offset]*1.829365079365079E-2+b*rho*th*src[7+offset]*1.087301587301587E-2+b*rho*th*src[16+offset]*4.603174603174603E-3-b*rho*th*src[25+offset]*7.896825396825397E-3-b*rho*th*src[34+offset]*1.829365079365079E-2;
dst[offset+29] = rho*th*src[2+offset]*4.865079365079365E-2+rho*th*src[11+offset]*1.563492063492063E-2+rho*th*src[20+offset]*4.865079365079365E-2+rho*th*src[29+offset]*1.370634920634921E-1+a*rho*th*src[5+offset]*7.896825396825397E-3-a*rho*th*src[14+offset]*4.603174603174603E-3-a*rho*th*src[23+offset]*1.087301587301587E-2+a*rho*th*src[32+offset]*1.829365079365079E-2+b*rho*th*src[8+offset]*1.087301587301587E-2+b*rho*th*src[17+offset]*4.603174603174603E-3-b*rho*th*src[26+offset]*7.896825396825397E-3-b*rho*th*src[35+offset]*1.829365079365079E-2;
dst[offset+30] = a*rho*th*src[0+offset]*7.896825396825397E-3+a*rho*th*src[9+offset]*4.603174603174603E-3+a*rho*th*src[18+offset]*1.087301587301587E-2+a*rho*th*src[27+offset]*1.829365079365079E-2+(a*a)*rho*th*src[3+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[12+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[21+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[30+offset]*(1.0/3.15E2)+a*b*rho*th*src[6+offset]*(1.0/6.0E2)+a*b*rho*th*src[15+offset]*(1.0/9.0E2)-a*b*rho*th*src[24+offset]*(1.0/6.0E2)-a*b*rho*th*src[33+offset]*(1.0/4.0E2);
dst[offset+31] = a*rho*th*src[1+offset]*7.896825396825397E-3+a*rho*th*src[10+offset]*4.603174603174603E-3+a*rho*th*src[19+offset]*1.087301587301587E-2+a*rho*th*src[28+offset]*1.829365079365079E-2+(a*a)*rho*th*src[4+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[13+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[22+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[31+offset]*(1.0/3.15E2)+a*b*rho*th*src[7+offset]*(1.0/6.0E2)+a*b*rho*th*src[16+offset]*(1.0/9.0E2)-a*b*rho*th*src[25+offset]*(1.0/6.0E2)-a*b*rho*th*src[34+offset]*(1.0/4.0E2);
dst[offset+32] = a*rho*th*src[2+offset]*7.896825396825397E-3+a*rho*th*src[11+offset]*4.603174603174603E-3+a*rho*th*src[20+offset]*1.087301587301587E-2+a*rho*th*src[29+offset]*1.829365079365079E-2+(a*a)*rho*th*src[5+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[14+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[23+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[32+offset]*(1.0/3.15E2)+a*b*rho*th*src[8+offset]*(1.0/6.0E2)+a*b*rho*th*src[17+offset]*(1.0/9.0E2)-a*b*rho*th*src[26+offset]*(1.0/6.0E2)-a*b*rho*th*src[35+offset]*(1.0/4.0E2);
dst[offset+33] = b*rho*th*src[0+offset]*(-1.087301587301587E-2)-b*rho*th*src[9+offset]*4.603174603174603E-3-b*rho*th*src[18+offset]*7.896825396825397E-3-b*rho*th*src[27+offset]*1.829365079365079E-2-(b*b)*rho*th*src[6+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[15+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[24+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[33+offset]*(1.0/3.15E2)-a*b*rho*th*src[3+offset]*(1.0/6.0E2)+a*b*rho*th*src[12+offset]*(1.0/9.0E2)+a*b*rho*th*src[21+offset]*(1.0/6.0E2)-a*b*rho*th*src[30+offset]*(1.0/4.0E2);
dst[offset+34] = b*rho*th*src[1+offset]*(-1.087301587301587E-2)-b*rho*th*src[10+offset]*4.603174603174603E-3-b*rho*th*src[19+offset]*7.896825396825397E-3-b*rho*th*src[28+offset]*1.829365079365079E-2-(b*b)*rho*th*src[7+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[16+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[25+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[34+offset]*(1.0/3.15E2)-a*b*rho*th*src[4+offset]*(1.0/6.0E2)+a*b*rho*th*src[13+offset]*(1.0/9.0E2)+a*b*rho*th*src[22+offset]*(1.0/6.0E2)-a*b*rho*th*src[31+offset]*(1.0/4.0E2);
dst[offset+35] = b*rho*th*src[2+offset]*(-1.087301587301587E-2)-b*rho*th*src[11+offset]*4.603174603174603E-3-b*rho*th*src[20+offset]*7.896825396825397E-3-b*rho*th*src[29+offset]*1.829365079365079E-2-(b*b)*rho*th*src[8+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[17+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[26+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[35+offset]*(1.0/3.15E2)-a*b*rho*th*src[5+offset]*(1.0/6.0E2)+a*b*rho*th*src[14+offset]*(1.0/9.0E2)+a*b*rho*th*src[23+offset]*(1.0/6.0E2)-a*b*rho*th*src[32+offset]*(1.0/4.0E2);
}
__global__ void multiplyByBody2DMass(double2* materials, double* src, double* dst, int numBodies, int numBeams, int numPlates, int numBody2Ds) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBody2Ds);
double mass = materials[index].x;
double inertia = materials[index].y;
uint offset = 3*numBodies+12*numBeams+36*numPlates+3*index;
dst[offset+0] = mass*src[offset+0];
dst[offset+1] = mass*src[offset+1];
dst[offset+2] = inertia*src[offset+2];
}
int System::buildAppliedImpulseVector() {
// build k
updateElasticForces();
if(bodies.size())hipLaunchKernelGGL(( multiplyByMass), dim3(BLOCKS(3*bodies.size())),dim3(THREADS), 0, 0, CASTD1(mass_d), CASTD1(v_d), CASTD1(k_d), 3*bodies.size());
if(beams.size())hipLaunchKernelGGL(( multiplyByBeamMass), dim3(BLOCKS(beams.size())),dim3(THREADS), 0, 0, CASTD3(contactGeometry_d), CASTD3(materialsBeam_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size());
if(plates.size())hipLaunchKernelGGL(( multiplyByPlateMass), dim3(BLOCKS(plates.size())),dim3(THREADS), 0, 0, CASTD3(contactGeometry_d), CASTD4(materialsPlate_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size(), plates.size());
if(body2Ds.size())hipLaunchKernelGGL(( multiplyByBody2DMass), dim3(BLOCKS(body2Ds.size())),dim3(THREADS), 0, 0, CASTD2(materialsBody2D_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size(), plates.size(), body2Ds.size());
if(nodes_h.size()) cusp::multiply(mass_shellMesh,v_shellMesh,k_shellMesh);
//cusp::blas::axpy(fElastic,fApplied,-1.0); //TODO: Come up with a fix for applied forces
cusp::blas::axpbypcz(f,fElastic,k,k,h,-h,1.0);
return 0;
}
__global__ void buildStabilization(double* b, double4* normalsAndPenetrations, double timeStep, uint offsetBilateralConstraints, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double penetration = normalsAndPenetrations[index].w;
b[3*index+offsetBilateralConstraints] = penetration/timeStep;
b[3*index+1+offsetBilateralConstraints] = 0;
b[3*index+2+offsetBilateralConstraints] = 0;
}
__global__ void buildStabilizationBilateral(double* b, double3* infoConstraintBilateralDOF, int2* constraintBilateralDOF, double* p, double timeStep, double time, uint numBilateralConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBilateralConstraints);
int2 constraintDOF = constraintBilateralDOF[index];
double3 info = infoConstraintBilateralDOF[index];
double tStart = info.y;
double velocity = info.x;
if(time<tStart) velocity = 0;
double p0 = info.z;
double violation = 0;
if(constraintDOF.y<0) {
violation = p[constraintDOF.x]-p0-velocity*(time-tStart);
} else {
violation = p[constraintDOF.x]-p[constraintDOF.y]-velocity*(time-tStart);
}
b[index] = violation/timeStep;
}
__global__ void buildStabilizationSpherical_ShellNodeToBody2D(double* b, int3* constraints, double3* pHats, double* p, double timeStep, uint numDOFConstraints, int numConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraints);
int offset = numDOFConstraints;
int3 constraint = constraints[index];
double3 pHat = pHats[index];
int indexS = constraint.x;
int indexB = constraint.y;
double phi = p[indexB+2];
b[3*index+offset+0] = (p[indexB]+pHat.x*cos(phi)-pHat.y*sin(phi)-p[indexS])/timeStep;
b[3*index+offset+1] = (p[indexB+1]+pHat.x*sin(phi)+pHat.y*cos(phi)-p[indexS+1])/timeStep;
b[3*index+offset+2] = (pHat.z-p[indexS+2])/timeStep;
}
int System::buildSchurVector() {
// build r
r_d.resize(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size());
b_d.resize(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size());
// TODO: There's got to be a better way to do this...
//r.resize(3*collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
cusp::multiply(mass,k,tmp);
cusp::multiply(D,tmp,r);
if(constraintsBilateralDOF_d.size())hipLaunchKernelGGL(( buildStabilizationBilateral), dim3(BLOCKS(constraintsBilateralDOF_d.size())),dim3(THREADS), 0, 0, CASTD1(b_d), CASTD3(infoConstraintBilateralDOF_d), CASTI2(constraintsBilateralDOF_d), CASTD1(p_d), h, time, constraintsBilateralDOF_d.size());
if(constraintsSpherical_ShellNodeToBody2D_d.size())hipLaunchKernelGGL(( buildStabilizationSpherical_ShellNodeToBody2D), dim3(BLOCKS(constraintsSpherical_ShellNodeToBody2D_d.size())),dim3(THREADS), 0, 0, CASTD1(b_d), CASTI3(constraintsSpherical_ShellNodeToBody2D_d), CASTD3(pSpherical_ShellNodeToBody2D_d), CASTD1(p_d), h, constraintsBilateralDOF_d.size(), constraintsSpherical_ShellNodeToBody2D_d.size());
if(collisionDetector->numCollisions)hipLaunchKernelGGL(( buildStabilization), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(b_d), CASTD4(collisionDetector->normalsAndPenetrations_d), h, constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), collisionDetector->numCollisions);
cusp::blas::axpy(b,r,1.0);
return 0;
}
int System::buildSchurMatrix() {
// build N
cusp::multiply(mass,DT,MinvDT);
cusp::multiply(D,MinvDT,N);
return 0;
}
__global__ void getNormalComponent(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
dst[index] = src[3*index];
}
__global__ void calculateConeViolation(double* gamma, double* friction, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double gamma_t = sqrt(pow(gamma[3*index+1],2.0)+pow(gamma[3*index+2],2.0));
double coneViolation = friction[index]*gamma[3*index] - gamma_t; // TODO: Keep the friction indexing in mind for bilaterals
if(coneViolation>0) coneViolation = 0;
dst[index] = coneViolation;
}
double4 System::getCCPViolation() {
double4 violationCCP = make_double4(0,0,0,0);
if(collisionDetector->numCollisions) {
// Build normal impulse vector, gamma_n
thrust::device_vector<double> gamma_n_d;
gamma_n_d.resize(collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_gamma_n(CASTD1(gamma_n_d));
DeviceValueArrayView gamma_n = DeviceValueArrayView(wrapped_device_gamma_n, wrapped_device_gamma_n + gamma_n_d.size());
hipLaunchKernelGGL(( getNormalComponent), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(gamma_d), CASTD1(gamma_n_d), collisionDetector->numCollisions);
violationCCP.x = Thrust_Min(gamma_n_d);
if(violationCCP.x > 0) violationCCP.x = 0;
// Build normal velocity vector, v_n
thrust::device_vector<double> tmp_gamma_d;
tmp_gamma_d.resize(3*collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_tmp_gamma(CASTD1(tmp_gamma_d));
DeviceValueArrayView tmp_gamma = DeviceValueArrayView(wrapped_device_tmp_gamma, wrapped_device_tmp_gamma + tmp_gamma_d.size());
thrust::device_vector<double> v_n_d;
v_n_d.resize(collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_v_n(CASTD1(v_n_d));
DeviceValueArrayView v_n = DeviceValueArrayView(wrapped_device_v_n, wrapped_device_v_n + v_n_d.size());
cusp::multiply(D,v,tmp_gamma);
cusp::blas::axpy(b,tmp_gamma,1.0);
hipLaunchKernelGGL(( getNormalComponent), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(tmp_gamma_d), CASTD1(v_n_d), collisionDetector->numCollisions);
violationCCP.y = Thrust_Min(v_n_d);
if(violationCCP.y > 0) violationCCP.y = 0;
// Check complementarity condition
violationCCP.z = cusp::blas::dot(gamma_n,v_n);
// Check friction cone condition
hipLaunchKernelGGL(( calculateConeViolation), dim3(BLOCKS(collisionDetector->numCollisions)),dim3(THREADS), 0, 0, CASTD1(gamma_d), CASTD1(friction_d), CASTD1(v_n_d), collisionDetector->numCollisions);
violationCCP.w = cusp::blas::nrm2(v_n);
}
return violationCCP;
}
double System::getPotentialEnergy() {
return -cusp::blas::dot(f,p);
}
double System::getKineticEnergy() {
if(bodies.size())hipLaunchKernelGGL(( multiplyByMass), dim3(BLOCKS(3*bodies.size())),dim3(THREADS), 0, 0, CASTD1(mass_d), CASTD1(v_d), CASTD1(tmp_d), 3*bodies.size());
if(beams.size())hipLaunchKernelGGL(( multiplyByBeamMass), dim3(BLOCKS(beams.size())),dim3(THREADS), 0, 0, CASTD3(contactGeometry_d), CASTD3(materialsBeam_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size());
if(plates.size())hipLaunchKernelGGL(( multiplyByPlateMass), dim3(BLOCKS(plates.size())),dim3(THREADS), 0, 0, CASTD3(contactGeometry_d), CASTD4(materialsPlate_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size(), plates.size());
if(body2Ds.size())hipLaunchKernelGGL(( multiplyByBody2DMass), dim3(BLOCKS(body2Ds.size())),dim3(THREADS), 0, 0, CASTD2(materialsBody2D_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size(), plates.size(), body2Ds.size());
if(nodes_h.size()) cusp::multiply(mass_shellMesh,v_shellMesh,tmp_shellMesh);
return 0.5*cusp::blas::dot(v,tmp);
}
double System::getStrainEnergy() {
double strainEnergy = 0;
if(beams.size()) strainEnergy+=thrust::reduce(strainEnergy_d.begin(),strainEnergy_d.end());
if(plates.size()) strainEnergy+=thrust::reduce(strainEnergyPlate_d.begin(),strainEnergyPlate_d.end());
if(nodes_h.size()) strainEnergy+=thrust::reduce(strainEnergyShellMesh_d.begin(),strainEnergyShellMesh_d.end());
return strainEnergy;
}
double System::getTotalEnergy() {
return getPotentialEnergy()+getKineticEnergy()+getStrainEnergy();
}
int System::outputContactForcePerCollision(string filename) {
ofstream filestream;
filestream.open(filename.c_str());
calculateContactForcePerCollision();
// copy device data to host
thrust::host_vector<double3> collisionLocations_h = collisionDetector->collisionLocations_d;
thrust::host_vector<double3> normalForcePerCollision_h = normalForcePerCollision_d;
thrust::host_vector<double3> frictionForcePerCollision_h = frictionForcePerCollision_d;
thrust::host_vector<int4> collisionMap_h = collisionMap_d;
thrust::host_vector<uint> collisionIdentifierA_h = collisionDetector->collisionIdentifierA_d;
thrust::host_vector<uint> collisionIdentifierB_h = collisionDetector->collisionIdentifierB_d;
filestream << collisionDetector->numCollisions << ", " << bodies.size() << ", " << beams.size() << ", " << plates.size()+shellConnectivities_h.size() << ", " << body2Ds.size() << ", " << endl;
for(int i=0; i<collisionDetector->numCollisions;i++) {
int bodyIdentifierA = collisionMap_h[collisionIdentifierA_h[i]].x;
int bodyIdentifierB = collisionMap_h[collisionIdentifierB_h[i]].x;
filestream << i << ", " << bodyIdentifierA << ", " << bodyIdentifierB << ", " << collisionLocations_h[i].x << ", " << collisionLocations_h[i].y << ", " << collisionLocations_h[i].z << ", " << normalForcePerCollision_h[i].x << ", " << normalForcePerCollision_h[i].y << ", " << normalForcePerCollision_h[i].z << ", " << frictionForcePerCollision_h[i].x << ", " << frictionForcePerCollision_h[i].y << ", " << frictionForcePerCollision_h[i].z << ", \n";
}
filestream.close();
return 0;
}
int System::exportSystem(string filename) {
ofstream filestream;
filestream.open(filename.c_str());
p_h = p_d;
v_h = v_d;
f_contact_h = f_contact_d;
filestream << bodies.size() << ", " << beams.size() << ", " << plates.size()+shellConnectivities_h.size() << ", " << body2Ds.size() << ", " << endl;
for (int i = 0; i < bodies.size(); i++) {
filestream
<< i << ", "
<< bodies[i]->isFixed() << ", "
<< p_h[3*i] << ", "
<< p_h[3*i+1] << ", "
<< p_h[3*i+2] << ", "
<< "1, "
<< "0, "
<< "0, "
<< "0, "
<< v_h[3*i] << ", "
<< v_h[3*i+1] << ", "
<< v_h[3*i+2] << ", ";
if(contactGeometry_h[i].y == 0) {
filestream
<< "0, "
<< contactGeometry_h[i].x << ", ";
}
else {
filestream
<< "2, "
<< contactGeometry_h[i].x << ", "
<< contactGeometry_h[i].y << ", "
<< contactGeometry_h[i].z << ", ";
}
filestream
<< "\n";
}
for (int i = 0; i < beams.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream
<< bodies.size()+i << ", "
<< contactGeometry_h[bodies.size()+i].x << ", "
<< contactGeometry_h[bodies.size()+i].y << ", "
<< p_h[3*bodies.size()+12*i] << ", "
<< p_h[3*bodies.size()+12*i+1] << ", "
<< p_h[3*bodies.size()+12*i+2] << ", "
<< p_h[3*bodies.size()+12*i+3] << ", "
<< p_h[3*bodies.size()+12*i+4] << ", "
<< p_h[3*bodies.size()+12*i+5] << ", "
<< p_h[3*bodies.size()+12*i+6] << ", "
<< p_h[3*bodies.size()+12*i+7] << ", "
<< p_h[3*bodies.size()+12*i+8] << ", "
<< p_h[3*bodies.size()+12*i+9] << ", "
<< p_h[3*bodies.size()+12*i+10] << ", "
<< p_h[3*bodies.size()+12*i+11] << ", "
<< v_h[3*bodies.size()+12*i] << ", "
<< v_h[3*bodies.size()+12*i+1] << ", "
<< v_h[3*bodies.size()+12*i+2] << ", "
<< v_h[3*bodies.size()+12*i+3] << ", "
<< v_h[3*bodies.size()+12*i+4] << ", "
<< v_h[3*bodies.size()+12*i+5] << ", "
<< v_h[3*bodies.size()+12*i+6] << ", "
<< v_h[3*bodies.size()+12*i+7] << ", "
<< v_h[3*bodies.size()+12*i+8] << ", "
<< v_h[3*bodies.size()+12*i+9] << ", "
<< v_h[3*bodies.size()+12*i+10] << ", "
<< v_h[3*bodies.size()+12*i+11] << ", ";
filestream
<< "\n";
}
for (int i = 0; i < plates.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream
<< bodies.size()+beams.size()+i << ", "
<< contactGeometry_h[bodies.size()+beams.size()+i].x << ", "
<< contactGeometry_h[bodies.size()+beams.size()+i].y << ", "
<< plates[i]->getThickness() << ", ";
for(int j=0;j<36;j++) {
filestream << p_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
for(int j=0;j<36;j++) {
filestream << v_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
for(int j=0;j<36;j++) {
filestream << f_contact_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
filestream << "\n";
}
for (int i = 0; i < shellConnectivities_h.size(); i++) {
filestream
<< bodies.size()+beams.size()+plates.size()+body2Ds.size()+i << ", "
<< shellGeometries_h[i].x << ", "
<< shellGeometries_h[i].y << ", "
<< shellGeometries_h[i].z << ", ";
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
double* p0 = &p_h[offset+9*shellConnectivities_h[i].x];
double* p1 = &p_h[offset+9*shellConnectivities_h[i].y];
double* p2 = &p_h[offset+9*shellConnectivities_h[i].z];
double* p3 = &p_h[offset+9*shellConnectivities_h[i].w];
double* v0 = &v_h[offset+9*shellConnectivities_h[i].x];
double* v1 = &v_h[offset+9*shellConnectivities_h[i].y];
double* v2 = &v_h[offset+9*shellConnectivities_h[i].z];
double* v3 = &v_h[offset+9*shellConnectivities_h[i].w];
double* f0 = &f_contact_h[offset+9*shellConnectivities_h[i].x];
double* f1 = &f_contact_h[offset+9*shellConnectivities_h[i].y];
double* f2 = &f_contact_h[offset+9*shellConnectivities_h[i].z];
double* f3 = &f_contact_h[offset+9*shellConnectivities_h[i].w];
for(int j=0;j<9;j++) filestream << p0[j] << ", ";
for(int j=0;j<9;j++) filestream << p1[j] << ", ";
for(int j=0;j<9;j++) filestream << p2[j] << ", ";
for(int j=0;j<9;j++) filestream << p3[j] << ", ";
for(int j=0;j<9;j++) filestream << v0[j] << ", ";
for(int j=0;j<9;j++) filestream << v1[j] << ", ";
for(int j=0;j<9;j++) filestream << v2[j] << ", ";
for(int j=0;j<9;j++) filestream << v3[j] << ", ";
for(int j=0;j<9;j++) filestream << f0[j] << ", ";
for(int j=0;j<9;j++) filestream << f1[j] << ", ";
for(int j=0;j<9;j++) filestream << f2[j] << ", ";
for(int j=0;j<9;j++) filestream << f3[j] << ", ";
filestream << "\n";
}
for (int i = 0; i < body2Ds.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream << bodies.size()+beams.size()+plates.size()+i << ", ";
for(int j=0;j<3;j++) {
filestream << p_h[3*bodies.size()+12*beams.size()+36*plates.size()+3*i+j] << ", ";
}
for(int j=0;j<3;j++) {
filestream << v_h[3*bodies.size()+12*beams.size()+36*plates.size()+3*i+j] << ", ";
}
filestream
<< "\n";
}
filestream.close();
return 0;
}
int System::importSystem(string filename) {
double3 pos;
double3 vel;
double3 geometry = make_double3(0,0,0);
int isFixed;
string temp_data;
int numBodies;
int numBeams;
int numPlates;
int numBodies2D;
double blah;
int index;
int shape;
ifstream ifile(filename.c_str());
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss1(temp_data);
ss1>>numBodies>>numBeams>>numPlates>>numBodies2D;
Body* bodyPtr;
for(int i=0; i<numBodies; i++) {
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss(temp_data);
ss>>index>>isFixed>>pos.x>>pos.y>>pos.z>>blah>>blah>>blah>>blah>>vel.x>>vel.y>>vel.z>>shape;
if(shape == 0) {
ss>>geometry.x;
geometry.y = 0;
geometry.z = 0;
} else {
ss>>geometry.x>>geometry.y>>geometry.z;
}
bodyPtr = new Body(pos);
bodyPtr->setBodyFixed(isFixed);
bodyPtr->setGeometry(geometry);
bodyPtr->setVelocity(vel);
if(shape == 0) {
bodyPtr->setMass(2600*4.0*3.14159*pow(geometry.x,3.0)/3.0);
} else {
bodyPtr->setMass(1.0);
}
add(bodyPtr);
//cout << index << " " << isFixed << " " << pos.x << " " << pos.y << " " << pos.z << " " << "1 0 0 0 " << vel.x << " " << vel.y << " " << vel.z << " " << shape << " " << geometry.x << " " << geometry.y << " " << geometry.z << endl;
}
// TODO: IMPORT BEAMS
return 0;
}
int System::exportMatrices(string directory) {
string filename = directory + "/D.mtx";
cusp::io::write_matrix_market_file(D, filename);
filename = directory + "/Minv.mtx";
cusp::io::write_matrix_market_file(mass, filename);
filename = directory + "/r.mtx";
cusp::io::write_matrix_market_file(r, filename);
filename = directory + "/b.mtx";
cusp::io::write_matrix_market_file(b, filename);
filename = directory + "/k.mtx";
cusp::io::write_matrix_market_file(k, filename);
return 0;
}
void System::importMesh(string filename, double stiffness, int numContactPointsPerElement, double pressure) {
string temp_data;
int numShells;
int numNodes;
int numNonzeros_M;
int numNonzeros_invM;
double3 node;
int4 connectivity;
double4 material;
double4 geometry;
int map;
double force;
int iVal;
int jVal;
double val;
ifstream ifile(filename.c_str());
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss1(temp_data);
ss1>>numNodes>>numShells>>numNonzeros_M>>numNonzeros_invM;
// read nodes
for(int i=0; i<3*numNodes; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>node.x>>node.y>>node.z;
nodes_h.push_back(node);
}
// read shell connectivity
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>connectivity.x>>connectivity.y>>connectivity.z>>connectivity.w;
shellConnectivities_h.push_back(connectivity);
}
shellConnectivities_d = shellConnectivities_h;
// read shell materials
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>material.x>>material.y>>material.z>>material.w;
material.y = stiffness;
shellMaterials_h.push_back(material);
pressureShell_h.push_back(pressure);
}
shellMaterials_d = shellMaterials_h;
pressureShell_d = pressureShell_h;
// read shell geometries
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>geometry.x>>geometry.y>>geometry.z>>geometry.w;
geometry.w = numContactPointsPerElement;
shellGeometries_h.push_back(geometry);
}
shellGeometries_d = shellGeometries_h;
//cout << endl;
// read shell map
for(int i=0; i<numShells*36; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>map;
shellMap_h.push_back(map);
}
shellMap_d = shellMap_h;
shellMap0_d = shellMap_h;
// read shell external force
for(int i=0; i<numNodes*9; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>force;
fextMesh_h.push_back(force);
}
// read shell mass matrix
for(int i=0; i<numNonzeros_M; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>iVal>>jVal>>val;
massShellI_h.push_back(iVal-1); // convert from 1-based indexing
massShellJ_h.push_back(jVal-1); // convert from 1-based indexing
massShell_h.push_back(val);
}
massShellI_d = massShellI_h;
massShellJ_d = massShellJ_h;
massShell_d = massShell_h;
// read shell inverse mass matrix
for(int i=0; i<numNonzeros_invM; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>iVal>>jVal>>val;
invMassShellI_h.push_back(iVal-1); // convert from 1-based indexing
invMassShellJ_h.push_back(jVal-1); // convert from 1-based indexing
invMassShell_h.push_back(val);
}
}
double3 System::transformNodalToCartesian_shellMesh(int shellIndex, double xi, double eta)
{
double a = shellGeometries_h[shellIndex].x;
double b = shellGeometries_h[shellIndex].y;
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
double* p0 = &p_h[offset+9*shellConnectivities_h[shellIndex].x];
double* p1 = &p_h[offset+9*shellConnectivities_h[shellIndex].y];
double* p2 = &p_h[offset+9*shellConnectivities_h[shellIndex].z];
double* p3 = &p_h[offset+9*shellConnectivities_h[shellIndex].w];
double3 pos;
pos.x = -eta*p2[0]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[0]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[0]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[0]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[6]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[6]*xi*(eta-1.0)+a*eta*p2[3]*(xi*xi)*(xi-1.0)+a*eta*p3[3]*xi*pow(xi-1.0,2.0)-b*eta*p0[6]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[6]*(eta-1.0)*(xi-1.0)-a*p0[3]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[3]*(xi*xi)*(eta-1.0)*(xi-1.0);
pos.y = -eta*p2[1]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[1]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[1]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[1]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[7]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[7]*xi*(eta-1.0)+a*eta*p2[4]*(xi*xi)*(xi-1.0)+a*eta*p3[4]*xi*pow(xi-1.0,2.0)-b*eta*p0[7]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[7]*(eta-1.0)*(xi-1.0)-a*p0[4]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[4]*(xi*xi)*(eta-1.0)*(xi-1.0);
pos.z = -eta*p2[2]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[2]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[2]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[2]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[8]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[8]*xi*(eta-1.0)+a*eta*p2[5]*(xi*xi)*(xi-1.0)+a*eta*p3[5]*xi*pow(xi-1.0,2.0)-b*eta*p0[8]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[8]*(eta-1.0)*(xi-1.0)-a*p0[5]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[5]*(xi*xi)*(eta-1.0)*(xi-1.0);
return pos;
}
| 59b38376c17c9f39a83750b6c654f0446cee8a9a.cu | #include <algorithm>
#include <vector>
#include "include.cuh"
#include "System.cuh"
#include "Solver.cuh"
#include "APGD.cuh"
#include "PDIP.cuh"
#include "TPAS.cuh"
#include "JKIP.cuh"
#include "PJKIP.cuh"
#include "PGJ.cuh"
#include "PGS.cuh"
System::System()
{
gravity = make_double3(0,-9.81,0);
frictionCoefficient = 0.25;
tol = 1e-8;
h = 1e-3;
timeIndex = 0;
time = 0;
elapsedTime = 0;
totalGPUMemoryUsed = 0;
offsetConstraintsDOF = 0;
objectiveCCP = 0;
collisionDetector = new CollisionDetector(this);
solver = new APGD(this);
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
wt6.push_back(0.17132449);
wt6.push_back(0.36076157);
wt6.push_back(0.46791393);
wt6.push_back(0.46791393);
wt6.push_back(0.36076157);
wt6.push_back(0.17132449);
pt6.push_back(-0.93246951);
pt6.push_back(-0.66120939);
pt6.push_back(-0.23861918);
pt6.push_back(0.23861918);
pt6.push_back(0.66120939);
pt6.push_back(0.93246951);
}
System::System(int solverType)
{
gravity = make_double3(0,-9.81,0);
frictionCoefficient = 0.25;
tol = 1e-8;
h = 1e-3;
timeIndex = 0;
time = 0;
elapsedTime = 0;
totalGPUMemoryUsed = 0;
offsetConstraintsDOF = 0;
objectiveCCP = 0;
collisionDetector = new CollisionDetector(this);
switch(solverType) {
case 1:
solver = new APGD(this);
break;
case 2:
solver = new PDIP(this);
break;
case 3:
solver = new TPAS(this);
break;
case 4:
solver = new JKIP(this);
break;
case 5:
solver = new PGJ(this);
break;
case 6:
solver = new PGS(this);
break;
case 7:
solver = new PJKIP(this);
break;
default:
solver = new APGD(this);
}
wt3.push_back(5.0 / 9.0);
wt3.push_back(8.0 / 9.0);
wt3.push_back(5.0 / 9.0);
pt3.push_back(-sqrt(3.0 / 5.0));
pt3.push_back(0.0);
pt3.push_back(sqrt(3.0 / 5.0));
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back(128. / 225.);
wt5.push_back((322. + 13. * sqrt(70.)) / 900.);
wt5.push_back((322. - 13. * sqrt(70.)) / 900.);
pt5.push_back(-(sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(-(sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back(0.);
pt5.push_back((sqrt(5. - 2. * sqrt(10. / 7.))) / 3.);
pt5.push_back((sqrt(5. + 2. * sqrt(10. / 7.))) / 3.);
wt6.push_back(0.17132449);
wt6.push_back(0.36076157);
wt6.push_back(0.46791393);
wt6.push_back(0.46791393);
wt6.push_back(0.36076157);
wt6.push_back(0.17132449);
pt6.push_back(-0.93246951);
pt6.push_back(-0.66120939);
pt6.push_back(-0.23861918);
pt6.push_back(0.23861918);
pt6.push_back(0.66120939);
pt6.push_back(0.93246951);
}
void System::setTimeStep(double step_size)
{
h = step_size;
}
void System::setFrictionCoefficient(double mu)
{
frictionCoefficient = mu;
}
int System::add(Body* body) {
//add the body
bodies.push_back(body);
return bodies.size();
}
int System::add(Beam* beam) {
//add the beam
beam->sys = this;
beams.push_back(beam);
return beams.size();
}
int System::add(Plate* plate) {
//add the plate
plate->sys = this;
plates.push_back(plate);
return plates.size();
}
int System::add(Body2D* body2D) {
//add the plate
body2D->sys = this;
body2Ds.push_back(body2D);
return body2Ds.size();
}
int System::initializeDevice() {
indices_d = indices_h;
p_d = p_h;
v_d = v_h;
a_d = a_h;
f_d = f_h;
f_contact_d = f_contact_h;
tmp_d = tmp_h;
r_d = r_h;
b_d = b_h;
k_d = k_h;
gamma_d = a_h;
friction_d = a_h;
fApplied_d = fApplied_h;
fElastic_d = fElastic_h;
massI_d = massI_h;
massJ_d = massJ_h;
mass_d = mass_h;
contactGeometry_d = contactGeometry_h;
collisionGeometry_d = collisionGeometry_h;
collisionMap_d = collisionMap_h;
materialsBeam_d = materialsBeam_h;
materialsPlate_d = materialsPlate_h;
materialsBody2D_d = materialsBody2D_h;
fixedBodies_d = fixedBodies_h;
strainDerivative_d = strainDerivative_h;
strain_d = strain_h;
strainEnergy_d = strainEnergy_h;
strainPlate_d = strainPlate_h;
strainEnergyPlate_d = strainEnergyPlate_h;
strainDerivativePlate_d = strainDerivativePlate_h;
curvatureDerivativePlate_d = curvatureDerivativePlate_h;
Sx_d = Sx_h;
Sxx_d = Sxx_h;
Sy_d = Sy_h;
Syy_d = Syy_h;
strainPlate0_d = strainPlate0_h;
curvaturePlate0_d = curvaturePlate0_h;
strainBeam0_d = strainBeam0_h;
curvatureBeam0_d = curvatureBeam0_h;
// Shell Mesh Initialization
fElasticShellMesh_d = fElasticShellMesh_h;
strainShellMesh_d = strainShellMesh_h;
strainEnergyShellMesh_d = strainEnergyShellMesh_h;
strainDerivativeShellMesh_d = strainDerivativeShellMesh_h;
curvatureDerivativeShellMesh_d = curvatureDerivativeShellMesh_h;
Sx_shellMesh_d = Sx_shellMesh_h;
Sxx_shellMesh_d = Sxx_shellMesh_h;
Sy_shellMesh_d = Sy_shellMesh_h;
Syy_shellMesh_d = Syy_shellMesh_h;
strainShellMesh0_d = strainShellMesh0_h;
curvatureShellMesh0_d = curvatureShellMesh0_h;
// End Shell Mesh Initialization
thrust::device_ptr<double> wrapped_device_p(CASTD1(p_d));
thrust::device_ptr<double> wrapped_device_v(CASTD1(v_d));
thrust::device_ptr<double> wrapped_device_a(CASTD1(a_d));
thrust::device_ptr<double> wrapped_device_f(CASTD1(f_d));
thrust::device_ptr<double> wrapped_device_f_contact(CASTD1(f_contact_d));
thrust::device_ptr<double> wrapped_device_fApplied(CASTD1(fApplied_d));
thrust::device_ptr<double> wrapped_device_fElastic(CASTD1(fElastic_d));
thrust::device_ptr<double> wrapped_device_tmp(CASTD1(tmp_d));
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
thrust::device_ptr<double> wrapped_device_k(CASTD1(k_d));
thrust::device_ptr<double> wrapped_device_gamma(CASTD1(gamma_d));
int offset_shellMesh = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
p = DeviceValueArrayView(wrapped_device_p, wrapped_device_p + p_d.size());
v = DeviceValueArrayView(wrapped_device_v, wrapped_device_v + v_d.size());
v_shellMesh = DeviceValueArrayView(wrapped_device_v + offset_shellMesh, wrapped_device_v + v_d.size());
a = DeviceValueArrayView(wrapped_device_a, wrapped_device_a + a_d.size());
f = DeviceValueArrayView(wrapped_device_f, wrapped_device_f + f_d.size());
f_contact = DeviceValueArrayView(wrapped_device_f_contact, wrapped_device_f_contact + f_contact_d.size());
fApplied = DeviceValueArrayView(wrapped_device_fApplied, wrapped_device_fApplied + fApplied_d.size());
fElastic = DeviceValueArrayView(wrapped_device_fElastic, wrapped_device_fElastic + fElastic_d.size());
tmp = DeviceValueArrayView(wrapped_device_tmp, wrapped_device_tmp + tmp_d.size());
tmp_shellMesh = DeviceValueArrayView(wrapped_device_tmp + offset_shellMesh, wrapped_device_tmp + tmp_d.size());
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
k = DeviceValueArrayView(wrapped_device_k, wrapped_device_k + k_d.size());
k_shellMesh = DeviceValueArrayView(wrapped_device_k + offset_shellMesh, wrapped_device_k + k_d.size());
gamma = DeviceValueArrayView(wrapped_device_gamma, wrapped_device_gamma + gamma_d.size());
// create mass matrix using cusp library (shouldn't change)
thrust::device_ptr<int> wrapped_device_I(CASTI1(massI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + massI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(massJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + massJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(mass_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + mass_d.size());
mass = DeviceView(a_d.size(), a_d.size(), mass_d.size(), row_indices, column_indices, values);
mass.sort_by_row();
// end create mass matrix
// create shellMesh mass matrix using cusp
thrust::device_ptr<int> wrapped_device_I_shell(CASTI1(massShellI_d));
DeviceIndexArrayView row_indices_shell = DeviceIndexArrayView(wrapped_device_I_shell, wrapped_device_I_shell + massShellI_h.size());
thrust::device_ptr<int> wrapped_device_J_shell(CASTI1(massShellJ_d));
DeviceIndexArrayView column_indices_shell = DeviceIndexArrayView(wrapped_device_J_shell, wrapped_device_J_shell + massShellJ_h.size());
thrust::device_ptr<double> wrapped_device_V_shell(CASTD1(massShell_d));
DeviceValueArrayView values_shell = DeviceValueArrayView(wrapped_device_V_shell, wrapped_device_V_shell + massShell_d.size());
mass_shellMesh = DeviceView(3*nodes_h.size(), 3*nodes_h.size(), massShell_d.size(), row_indices_shell, column_indices_shell, values_shell);
mass_shellMesh.sort_by_row();
// end create shellMesh mass matrix using cusp
// calculate initialize strains and curvatures
calculateInitialStrainAndCurvature();
processConstraints();
offsetBilaterals_d = offsetBilaterals_h;
constraintsBilateralDOF_d = constraintsBilateralDOF_h;
infoConstraintBilateralDOF_d = infoConstraintBilateralDOF_h;
constraintsSpherical_ShellNodeToBody2D_d =constraintsSpherical_ShellNodeToBody2D_h;
pSpherical_ShellNodeToBody2D_d = pSpherical_ShellNodeToBody2D_h;
return 0;
}
int System::processConstraints() {
// process the DOF bilaterals
int offset = 0;
for(int i=0;i<constraintsBilateralDOF_h.size();i++) {
offsetBilaterals_h.push_back(offset);
if(constraintsBilateralDOF_h[i].y<0) {
offset+=1;
infoConstraintBilateralDOF_h[i].z = p_h[constraintsBilateralDOF_h[i].x]; // need to know initial value
} else {
offset+=2;
}
}
// end process the DOF bilaterals
// process the ShellNodeToBody2D spherical constraints
for(int i=0;i<constraintsSpherical_ShellNodeToBody2D_h.size();i++) {
int indexA = constraintsSpherical_ShellNodeToBody2D_h[i].x;
int nodeIndexA = constraintsSpherical_ShellNodeToBody2D_h[i].y;
int indexB = constraintsSpherical_ShellNodeToBody2D_h[i].z;
int offsetA;
int offsetB;
if(indexA==-1) {
// shell mesh
offsetA = 3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+9*nodeIndexA;
offsetB = 3*bodies.size()+12*beams.size()+36*plates.size()+3*indexB;
} else {
// plate
offsetA = 3*bodies.size()+12*beams.size()+36*indexA+9*nodeIndexA;
offsetB = 3*bodies.size()+12*beams.size()+36*plates.size()+3*indexB;
}
constraintsSpherical_ShellNodeToBody2D_h[i].x = offsetA; // NOTE: Reset value to offsets! Easier for later constraint processing
constraintsSpherical_ShellNodeToBody2D_h[i].y = offsetB; // NOTE: Reset value to offsets! Easier for later constraint processing
pSpherical_ShellNodeToBody2D_h.push_back(make_double3(p_h[offsetA]-p_h[offsetB],p_h[offsetA+1]-p_h[offsetB+1],p_h[offsetA+2]));
}
// end process the ShellNodeToBody2D spherical constraints
return 0;
}
int System::initializeSystem() {
for(int j=0; j<bodies.size(); j++) {
Body* body = bodies[j];
body->setIdentifier(j); // Indicates the number that the Body was added
body->setIndex(p_h.size()); // Indicates the Body's location in the position array
// Push Body's location to global library
indices_h.push_back(p_h.size());
// update p
p_h.push_back(body->pos.x);
p_h.push_back(body->pos.y);
p_h.push_back(body->pos.z);
// update v
v_h.push_back(body->vel.x);
v_h.push_back(body->vel.y);
v_h.push_back(body->vel.z);
// update a
a_h.push_back(body->acc.x);
a_h.push_back(body->acc.y);
a_h.push_back(body->acc.z);
// update external force vector (gravity)
if(body->isFixed()) {
f_h.push_back(0);
f_h.push_back(0);
f_h.push_back(0);
}
else {
f_h.push_back(body->mass * this->gravity.x);
f_h.push_back(body->mass * this->gravity.y);
f_h.push_back(body->mass * this->gravity.z);
}
f_contact_h.push_back(0);
f_contact_h.push_back(0);
f_contact_h.push_back(0);
fApplied_h.push_back(0);
fApplied_h.push_back(0);
fApplied_h.push_back(0);
fElastic_h.push_back(0);
fElastic_h.push_back(0);
fElastic_h.push_back(0);
tmp_h.push_back(0);
tmp_h.push_back(0);
tmp_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
r_h.push_back(0);
k_h.push_back(0);
k_h.push_back(0);
k_h.push_back(0);
// update the mass matrix
for(int i = 0; i < body->numDOF; i++) {
massI_h.push_back(i + body->numDOF * j);
massJ_h.push_back(i + body->numDOF * j);
if(body->isFixed()) {
mass_h.push_back(0);
}
else {
mass_h.push_back(1.0/body->mass);
}
}
contactGeometry_h.push_back(body->contactGeometry);
collisionGeometry_h.push_back(body->contactGeometry);
collisionMap_h.push_back(make_int4(body->getIdentifier(),0,0,body->getCollisionFamily()));
if(body->isFixed()) fixedBodies_h.push_back(j);
}
for(int j=0; j<beams.size(); j++) {
beams[j]->addBeam(j); //TODO: Make a function like this for body (makes code cleaner)
}
for(int j=0; j<plates.size(); j++) {
plates[j]->addPlate(j);
}
for(int j=0; j<body2Ds.size(); j++) {
body2Ds[j]->addBody2D(j);
}
// add shell mesh to system
if(nodes_h.size()) {
indices_h.push_back(p_h.size());
// update p
for(int i=0; i<nodes_h.size(); i++) {
p_h.push_back(nodes_h[i].x);
p_h.push_back(nodes_h[i].y);
p_h.push_back(nodes_h[i].z);
}
// update fext
for(int i=0; i<fextMesh_h.size(); i++) {
f_h.push_back(fextMesh_h[i]);
}
// update zero vectors
for(int i=0;i<3*nodes_h.size();i++) {
v_h.push_back(0);
a_h.push_back(0);
f_contact_h.push_back(0);
fApplied_h.push_back(0);
fElastic_h.push_back(0);
tmp_h.push_back(0);
k_h.push_back(0);
r_h.push_back(0);
}
// update the mass inverse
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
for(int i=0;i<invMassShellI_h.size();i++) {
massI_h.push_back(invMassShellI_h[i]+offset);
massJ_h.push_back(invMassShellJ_h[i]+offset);
mass_h.push_back(invMassShell_h[i]);
}
for(int j=0;j<shellConnectivities_h.size();j++) {
contactGeometry_h.push_back(make_double3(shellGeometries_h[j].x,shellGeometries_h[j].y,shellGeometries_h[j].w));
for(int i=0;i<36;i++) {
fElasticShellMesh_h.push_back(0);
strainDerivativeShellMesh_h.push_back(make_double3(0,0,0));
curvatureDerivativeShellMesh_h.push_back(make_double3(0,0,0));
}
strainEnergyShellMesh_h.push_back(0);
strainShellMesh_h.push_back(make_double3(0,0,0));
for(int i=0;i<wt6.size()*pt6.size();i++) strainShellMesh0_h.push_back(make_double3(0,0,0));
for(int i=0;i<wt5.size()*pt5.size();i++) curvatureShellMesh0_h.push_back(make_double3(0,0,0));
for(int i=0;i<12;i++) {
Sx_shellMesh_h.push_back(0);
Sxx_shellMesh_h.push_back(0);
Sy_shellMesh_h.push_back(0);
Syy_shellMesh_h.push_back(0);
}
for(int i=1;i<shellGeometries_h[j].w-1;i++) {
for(int k=1;k<shellGeometries_h[j].w-1;k++) {
collisionGeometry_h.push_back(make_double3(0.5*shellGeometries_h[j].z,0,0));
collisionMap_h.push_back(make_int4(plates.size()+beams.size()+bodies.size()+body2Ds.size()+j,i,k,-2));
}
}
}
}
initializeDevice();
solver->setup();
return 0;
}
int System::addBilateralConstraintDOF(int DOFA, int DOFB) {
constraintsBilateralDOF_h.push_back(make_int2(DOFA,DOFB));
infoConstraintBilateralDOF_h.push_back(make_double3(0,0,0));
if(DOFB<0) {
offsetConstraintsDOF=offsetConstraintsDOF+1;
} else {
offsetConstraintsDOF=offsetConstraintsDOF+2;
}
return 0;
}
int System::addBilateralConstraintDOF(int DOFA, int DOFB, double velocity, double startTime) {
constraintsBilateralDOF_h.push_back(make_int2(DOFA,DOFB));
infoConstraintBilateralDOF_h.push_back(make_double3(velocity,startTime,0));
if(DOFB<0) {
offsetConstraintsDOF=offsetConstraintsDOF+1;
} else {
offsetConstraintsDOF=offsetConstraintsDOF+2;
}
return 0;
}
int System::pinShellNodeToBody2D(int shellNodeIndex, int body2Dindex) {
constraintsSpherical_ShellNodeToBody2D_h.push_back(make_int3(-1,shellNodeIndex,body2Dindex));
return 0;
}
int System::pinPlateNodeToBody2D(int plateIndex, int plateNodeIndex, int body2Dindex) {
constraintsSpherical_ShellNodeToBody2D_h.push_back(make_int3(plateIndex,plateNodeIndex,body2Dindex));
return 0;
}
int System::DoTimeStep() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
objectiveCCP = 0;
// Perform collision detection
if(collisionGeometry_d.size()) {
collisionDetector->generateAxisAlignedBoundingBoxes();
collisionDetector->detectPossibleCollisions_spatialSubdivision();
collisionDetector->detectCollisions();
}
buildAppliedImpulseVector();
if(collisionDetector->numCollisions||constraintsBilateralDOF_d.size()||constraintsSpherical_ShellNodeToBody2D_d.size()) {
// Set up the QOCC
buildContactJacobian();
buildSchurVector();
// Solve the QOCC
solver->solve();
// Perform time integration (contacts) TODO: Get rid of constraint forces in f_contact vector!
cusp::multiply(DT,gamma,f_contact);
cusp::blas::axpby(k,f_contact,tmp,1.0,1.0);
cusp::multiply(mass,tmp,v);
cusp::blas::scal(f_contact,1.0/h);
}
else {
// Perform time integration (no contacts)
cusp::multiply(mass,k,v);
cusp::blas::fill(f_contact,0.0);
}
cusp::blas::axpy(v, p, h);
time += h;
timeIndex++;
//p_h = p_d;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float execTime;
cudaEventElapsedTime(&execTime, start, stop);
elapsedTime = execTime;
printf("Time: %f (Exec. Time: %f), Collisions: %d (%d possible)\n",time,elapsedTime,collisionDetector->numCollisions, (int)collisionDetector->numPossibleCollisions);
size_t avail;
size_t total;
cudaMemGetInfo( &avail, &total );
size_t used = total - avail;
totalGPUMemoryUsed = used/1000000.0;
cout << " Device memory used: " << totalGPUMemoryUsed << " MB (Avail: " << avail/1000000 << " MB)" << endl;
return 0;
}
int System::applyForce(Body* body, double3 force) {
int index = body->getIndex();
//cout << index << endl;
fApplied_h[index]+=force.x;
fApplied_h[index+1]+=force.y;
fApplied_h[index+2]+=force.z;
return 0;
}
int System::clearAppliedForces() {
Thrust_Fill(fApplied_d,0.0);
fApplied_h = fApplied_d;
return 0;
}
__global__ void constructBilateralJacobian(int2* constraintBilateralDOF, int* offsets, int* DI, int* DJ, double* D, uint numConstraintsBilateral) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraintsBilateral);
int2 bilateralDOFs = constraintBilateralDOF[index];
int offset = offsets[index];
DI[offset] = index;
DJ[offset] = bilateralDOFs.x;
D[offset] = 1.0;
if(bilateralDOFs.y>=0)
{
DI[offset+1] = index;
DJ[offset+1] = bilateralDOFs.y;
D[offset+1] = -1.0;
}
}
__global__ void constructSpherical_ShellNodeToBody2DJacobian(int3* constraints, double3* pHats, double* p, int* DI, int* DJ, double* D, int numConstraintsDOF, int offsetConstraintsDOF, uint numConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraints);
int offset = offsetConstraintsDOF;
int3 constraint = constraints[index];
double3 pHat = pHats[index];
int offsetS = constraint.x;
int offsetB = constraint.y;
DI[7*index+0+offset] = 3*index+numConstraintsDOF;
DI[7*index+1+offset] = 3*index+numConstraintsDOF;
DI[7*index+2+offset] = 3*index+numConstraintsDOF;
DI[7*index+3+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+4+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+5+offset] = 3*index+1+numConstraintsDOF;
DI[7*index+6+offset] = 3*index+2+numConstraintsDOF;
DJ[7*index+0+offset] = offsetB;
DJ[7*index+1+offset] = offsetB+2;
DJ[7*index+2+offset] = offsetS;
DJ[7*index+3+offset] = offsetB+1;
DJ[7*index+4+offset] = offsetB+2;
DJ[7*index+5+offset] = offsetS+1;
DJ[7*index+6+offset] = offsetS+2;
double phi = p[offsetB+2];
D[7*index+0+offset] = 1.0;
D[7*index+1+offset] = -pHat.x*sin(phi)-pHat.y*cos(phi);
D[7*index+2+offset] = -1.0;
D[7*index+3+offset] = 1.0;
D[7*index+4+offset] = pHat.x*cos(phi)-pHat.y*sin(phi);
D[7*index+5+offset] = -1.0;
D[7*index+6+offset] = -1.0;
}
__global__ void constructContactJacobian(int* nonzerosPerContact_d, int4* collisionMap, int4* connectivities, double3* geometries, double3* collisionGeometry, int* DI, int* DJ, double* D, double* friction, double mu, double4* normalsAndPenetrations, uint* collisionIdentifierA, uint* collisionIdentifierB, int* indices, int numBodies, int numBeams, int numPlates, int numBodys2D, uint offsetConstraintsBilateral, uint numConstraintsBilateral, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
friction[index] = mu; // TODO: EDIT THIS TO BE MINIMUM OF FRICTION COEFFICIENTS
bool shellMeshA = false;
bool shellMeshB = false;
int4 connectivityA;
int4 connectivityB;
int shellOffset = 3*numBodies+12*numBeams+36*numPlates+3*numBodys2D;
int offsetA = (!index) ? 0 : nonzerosPerContact_d[index - 1];
offsetA+=offsetConstraintsBilateral; // add offset for bilateral constraints
DI = &DI[offsetA];
DJ = &DJ[offsetA];
D = &D[offsetA];
int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
int endA = 3;
if(bodyIdentifierA<numBodies) {
endA = 3; // body
}
else if(bodyIdentifierA<(numBodies+numBeams)) {
endA = 12; // beam
}
else if(bodyIdentifierA<(numBodies+numBeams+numPlates)) {
endA = 36; // plate
}
else if(bodyIdentifierA<(numBodies+numBeams+numPlates+numBodys2D)) {
endA = 3; // body2D
}
else {
endA = 36; // shellMesh
shellMeshA = true;
connectivityA = connectivities[bodyIdentifierA-(numBodies+numBeams+numPlates+numBodys2D)];
}
int endB = 3;
if(bodyIdentifierB<numBodies) {
endB = 3; // body
}
else if(bodyIdentifierB<(numBodies+numBeams)) {
endB = 12; // beam
}
else if(bodyIdentifierB<(numBodies+numBeams+numPlates)) {
endB = 36; // plate
}
else if(bodyIdentifierB<(numBodies+numBeams+numPlates+numBodys2D)) {
endB = 3; // body2D
}
else {
endB = 36; // shellMesh
shellMeshB = true;
connectivityB = connectivities[bodyIdentifierB-(numBodies+numBeams+numPlates+numBodys2D)];
}
int indexA = indices[bodyIdentifierA];
int indexB = indices[bodyIdentifierB];
double xiA = static_cast<double>(collisionMap[collisionIdentifierA[index]].y)/(static_cast<double>(geometries[bodyIdentifierA].z-1));
double etaA = static_cast<double>(collisionMap[collisionIdentifierA[index]].z)/(static_cast<double>(geometries[bodyIdentifierA].z-1));
double aA = geometries[bodyIdentifierA].x;
double bA = geometries[bodyIdentifierA].y;
double lA = bA;
double xiB = static_cast<double>(collisionMap[collisionIdentifierB[index]].y)/(static_cast<double>(geometries[bodyIdentifierB].z-1));
double etaB = static_cast<double>(collisionMap[collisionIdentifierB[index]].z)/(static_cast<double>(geometries[bodyIdentifierB].z-1));
double aB = geometries[bodyIdentifierB].x;
double bB = geometries[bodyIdentifierB].y;
double lB = bB;
double4 nAndP;
double3 n, u, v;
nAndP = normalsAndPenetrations[index];
n = make_double3(nAndP.x,nAndP.y,nAndP.z);
if(n.z != 0) {
u = normalize(make_double3(1,0,-n.x/n.z));
}
else if(n.x != 0) {
u = normalize(make_double3(-n.z/n.x,0,1));
}
else {
u = normalize(make_double3(1,-n.x/n.y,0));
}
v = normalize(cross(n,u));
// Add n, i indices
int i;
int end = endA;
int j = 0;
for(i=0;i<end;i++) {
DI[i] = 3*index+0+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+0+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add u, i indices
end+=endA;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+1+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+1+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add v, i indices
end+=endA;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+2+numConstraintsBilateral;
if(shellMeshA) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityA.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityA.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityA.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityA.w+j;
}
} else {
DJ[i] = indexA+j;
}
j++;
}
end+=endB;
j = 0;
for(;i<end;i++) {
DI[i] = 3*index+2+numConstraintsBilateral;
if(shellMeshB) {
if(j<9) {
DJ[i] = shellOffset+9*connectivityB.x+j;
}
else if (j<18) {
DJ[i] = shellOffset-9+9*connectivityB.y+j;
}
else if (j<27) {
DJ[i] = shellOffset-18+9*connectivityB.z+j;
}
else {
DJ[i] = shellOffset-27+9*connectivityB.w+j;
}
} else {
DJ[i] = indexB+j;
}
j++;
}
// Add n, values
int startIndex = 0;
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = n.x;
D[startIndex+1] = n.y;
D[startIndex+2] = n.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = n.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = n.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = n.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*n.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*n.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*n.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = n.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = n.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = n.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*n.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*n.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*n.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = n.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = n.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = n.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*n.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*n.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*n.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*n.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*n.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*n.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -n.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -n.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -n.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*n.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*n.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*n.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*n.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*n.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*n.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*n.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*n.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*n.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*n.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*n.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*n.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*n.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*n.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*n.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*n.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*n.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*n.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*n.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*n.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*n.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*n.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*n.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*n.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -n.x;
D[startIndex+1] = -n.y;
D[startIndex+2] = -n.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -n.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -n.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -n.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*n.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*n.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*n.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -n.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -n.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -n.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*n.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*n.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*n.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -n.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -n.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -n.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*n.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*n.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*n.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*n.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*n.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*n.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = n.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = n.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = n.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*n.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*n.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*n.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*n.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*n.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*n.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*n.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*n.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*n.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*n.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*n.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*n.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*n.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*n.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*n.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*n.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*n.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*n.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*n.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*n.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*n.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*n.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*n.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*n.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
// Add u, values
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = u.x;
D[startIndex+1] = u.y;
D[startIndex+2] = u.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = u.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = u.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = u.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*u.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*u.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*u.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = u.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = u.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = u.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*u.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*u.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*u.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = u.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = u.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = u.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*u.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*u.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*u.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*u.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*u.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*u.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -u.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -u.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -u.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*u.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*u.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*u.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*u.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*u.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*u.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*u.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*u.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*u.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*u.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*u.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*u.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*u.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*u.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*u.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*u.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*u.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*u.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*u.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*u.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*u.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*u.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*u.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*u.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -u.x;
D[startIndex+1] = -u.y;
D[startIndex+2] = -u.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -u.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -u.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -u.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*u.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*u.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*u.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -u.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -u.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -u.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*u.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*u.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*u.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -u.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -u.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -u.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*u.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*u.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*u.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*u.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*u.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*u.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = u.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = u.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = u.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*u.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*u.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*u.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*u.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*u.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*u.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*u.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*u.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*u.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*u.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*u.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*u.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*u.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*u.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*u.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*u.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*u.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*u.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*u.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*u.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*u.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*u.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*u.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*u.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
// Add v, values
if(bodyIdentifierA<numBodies) {
D[startIndex+0] = v.x;
D[startIndex+1] = v.y;
D[startIndex+2] = v.z;
startIndex+=3;
} else if (bodyIdentifierA<numBodies+numBeams) {
D[startIndex+0 ] = v.x*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+1 ] = v.y*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+2 ] = v.z*(2.0*xiA*xiA*xiA-3.0*xiA*xiA+1.0);
D[startIndex+3 ] = lA*v.x*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+4 ] = lA*v.y*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+5 ] = lA*v.z*(xiA*xiA*xiA-2.0*xiA*xiA+xiA);
D[startIndex+6 ] = v.x*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+7 ] = v.y*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+8 ] = v.z*(-2.0*xiA*xiA*xiA+3.0*xiA*xiA);
D[startIndex+9 ] = -lA*v.x*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+10] = -lA*v.y*(-xiA*xiA*xiA+xiA*xiA);
D[startIndex+11] = -lA*v.z*(-xiA*xiA*xiA+xiA*xiA);
startIndex+=12;
} else {
D[startIndex+0 ] = v.x*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+1 ] = v.y*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+2 ] = v.z*(etaA-1.0)*(xiA-1.0)*(etaA+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0+1.0);
D[startIndex+3 ] = -aA*v.x*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+4 ] = -aA*v.y*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+5 ] = -aA*v.z*xiA*(etaA-1.0)*pow(xiA-1.0,2.0);
D[startIndex+6 ] = -bA*etaA*v.x*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+7 ] = -bA*etaA*v.y*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+8 ] = -bA*etaA*v.z*pow(etaA-1.0,2.0)*(xiA-1.0);
D[startIndex+9 ] = -v.x*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+10] = -v.y*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+11] = -v.z*xiA*(etaA-1.0)*(etaA+xiA*3.0-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+12] = -aA*v.x*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+13] = -aA*v.y*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+14] = -aA*v.z*(xiA*xiA)*(etaA-1.0)*(xiA-1.0);
D[startIndex+15] = bA*etaA*v.x*xiA*pow(etaA-1.0,2.0);
D[startIndex+16] = bA*etaA*v.y*xiA*pow(etaA-1.0,2.0);
D[startIndex+17] = bA*etaA*v.z*xiA*pow(etaA-1.0,2.0);
D[startIndex+18] = -etaA*v.x*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+19] = -etaA*v.y*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+20] = -etaA*v.z*xiA*(etaA*-3.0-xiA*3.0+(etaA*etaA)*2.0+(xiA*xiA)*2.0+1.0);
D[startIndex+21] = aA*etaA*v.x*(xiA*xiA)*(xiA-1.0);
D[startIndex+22] = aA*etaA*v.y*(xiA*xiA)*(xiA-1.0);
D[startIndex+23] = aA*etaA*v.z*(xiA*xiA)*(xiA-1.0);
D[startIndex+24] = bA*(etaA*etaA)*v.x*xiA*(etaA-1.0);
D[startIndex+25] = bA*(etaA*etaA)*v.y*xiA*(etaA-1.0);
D[startIndex+26] = bA*(etaA*etaA)*v.z*xiA*(etaA-1.0);
D[startIndex+27] = -etaA*v.x*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+28] = -etaA*v.y*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+29] = -etaA*v.z*(xiA-1.0)*(etaA*3.0+xiA-(etaA*etaA)*2.0-(xiA*xiA)*2.0);
D[startIndex+30] = aA*etaA*v.x*xiA*pow(xiA-1.0,2.0);
D[startIndex+31] = aA*etaA*v.y*xiA*pow(xiA-1.0,2.0);
D[startIndex+32] = aA*etaA*v.z*xiA*pow(xiA-1.0,2.0);
D[startIndex+33] = -bA*(etaA*etaA)*v.x*(etaA-1.0)*(xiA-1.0);
D[startIndex+34] = -bA*(etaA*etaA)*v.y*(etaA-1.0)*(xiA-1.0);
D[startIndex+35] = -bA*(etaA*etaA)*v.z*(etaA-1.0)*(xiA-1.0);
startIndex+=36;
}
if(bodyIdentifierB<numBodies) {
D[startIndex+0] = -v.x;
D[startIndex+1] = -v.y;
D[startIndex+2] = -v.z;
startIndex+=3;
} else if (bodyIdentifierB<numBodies+numBeams) {
D[startIndex+0 ] = -v.x*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+1 ] = -v.y*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+2 ] = -v.z*(2.0*xiB*xiB*xiB-3.0*xiB*xiB+1.0);
D[startIndex+3 ] = -lB*v.x*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+4 ] = -lB*v.y*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+5 ] = -lB*v.z*(xiB*xiB*xiB-2.0*xiB*xiB+xiB);
D[startIndex+6 ] = -v.x*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+7 ] = -v.y*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+8 ] = -v.z*(-2.0*xiB*xiB*xiB+3.0*xiB*xiB);
D[startIndex+9 ] = lB*v.x*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+10] = lB*v.y*(-xiB*xiB*xiB+xiB*xiB);
D[startIndex+11] = lB*v.z*(-xiB*xiB*xiB+xiB*xiB);
startIndex+=12;
} else {
D[startIndex+0 ] = -v.x*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+1 ] = -v.y*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+2 ] = -v.z*(etaB-1.0)*(xiB-1.0)*(etaB+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0+1.0);
D[startIndex+3 ] = aB*v.x*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+4 ] = aB*v.y*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+5 ] = aB*v.z*xiB*(etaB-1.0)*pow(xiB-1.0,2.0);
D[startIndex+6 ] = bB*etaB*v.x*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+7 ] = bB*etaB*v.y*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+8 ] = bB*etaB*v.z*pow(etaB-1.0,2.0)*(xiB-1.0);
D[startIndex+9 ] = v.x*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+10] = v.y*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+11] = v.z*xiB*(etaB-1.0)*(etaB+xiB*3.0-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+12] = aB*v.x*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+13] = aB*v.y*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+14] = aB*v.z*(xiB*xiB)*(etaB-1.0)*(xiB-1.0);
D[startIndex+15] = -bB*etaB*v.x*xiB*pow(etaB-1.0,2.0);
D[startIndex+16] = -bB*etaB*v.y*xiB*pow(etaB-1.0,2.0);
D[startIndex+17] = -bB*etaB*v.z*xiB*pow(etaB-1.0,2.0);
D[startIndex+18] = etaB*v.x*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+19] = etaB*v.y*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+20] = etaB*v.z*xiB*(etaB*-3.0-xiB*3.0+(etaB*etaB)*2.0+(xiB*xiB)*2.0+1.0);
D[startIndex+21] = -aB*etaB*v.x*(xiB*xiB)*(xiB-1.0);
D[startIndex+22] = -aB*etaB*v.y*(xiB*xiB)*(xiB-1.0);
D[startIndex+23] = -aB*etaB*v.z*(xiB*xiB)*(xiB-1.0);
D[startIndex+24] = -bB*(etaB*etaB)*v.x*xiB*(etaB-1.0);
D[startIndex+25] = -bB*(etaB*etaB)*v.y*xiB*(etaB-1.0);
D[startIndex+26] = -bB*(etaB*etaB)*v.z*xiB*(etaB-1.0);
D[startIndex+27] = etaB*v.x*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+28] = etaB*v.y*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+29] = etaB*v.z*(xiB-1.0)*(etaB*3.0+xiB-(etaB*etaB)*2.0-(xiB*xiB)*2.0);
D[startIndex+30] = -aB*etaB*v.x*xiB*pow(xiB-1.0,2.0);
D[startIndex+31] = -aB*etaB*v.y*xiB*pow(xiB-1.0,2.0);
D[startIndex+32] = -aB*etaB*v.z*xiB*pow(xiB-1.0,2.0);
D[startIndex+33] = bB*(etaB*etaB)*v.x*(etaB-1.0)*(xiB-1.0);
D[startIndex+34] = bB*(etaB*etaB)*v.y*(etaB-1.0)*(xiB-1.0);
D[startIndex+35] = bB*(etaB*etaB)*v.z*(etaB-1.0)*(xiB-1.0);
startIndex+=36;
}
}
__global__ void updateContactForcePerCollision(double3* normalForcePerCollision, double3* frictionForcePerCollision, double* gammaGlobal, double4* normalsAndPenetrations, double h, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
//int4* collisionMap, uint* collisionIdentifierA, uint* collisionIdentifierB,
//int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
//int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
double4 nAndP;
double3 n, u, v;
nAndP = normalsAndPenetrations[index];
n = make_double3(nAndP.x,nAndP.y,nAndP.z);
if(n.z != 0) {
u = normalize(make_double3(1,0,-n.x/n.z));
}
else if(n.x != 0) {
u = normalize(make_double3(-n.z/n.x,0,1));
}
else {
u = normalize(make_double3(1,-n.x/n.y,0));
}
v = normalize(cross(n,u));
double3 gamma = make_double3(gammaGlobal[3*index],gammaGlobal[3*index+1],gammaGlobal[3*index+2]);
normalForcePerCollision[index] = gamma.x*n/h;
frictionForcePerCollision[index] = (gamma.y*u+gamma.z*v)/h;
}
int System::calculateContactForcePerCollision() {
normalForcePerCollision_d.resize(collisionDetector->numCollisions);
frictionForcePerCollision_d.resize(collisionDetector->numCollisions);
if(collisionDetector->numCollisions) updateContactForcePerCollision<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTD3(normalForcePerCollision_d), CASTD3(frictionForcePerCollision_d), CASTD1(gamma_d), CASTD4(collisionDetector->normalsAndPenetrations_d), h, collisionDetector->numCollisions);
return 0;
}
int System::outputContactForcePerCollision() {
calculateContactForcePerCollision();
// copy device data to host
thrust::host_vector<double3> collisionLocations_h = collisionDetector->collisionLocations_d;
thrust::host_vector<double3> normalForcePerCollision_h = normalForcePerCollision_d;
thrust::host_vector<double3> frictionForcePerCollision_h = frictionForcePerCollision_d;
thrust::host_vector<int4> collisionMap_h = collisionMap_d;
thrust::host_vector<uint> collisionIdentifierA_h = collisionDetector->collisionIdentifierA_d;
thrust::host_vector<uint> collisionIdentifierB_h = collisionDetector->collisionIdentifierB_d;
for(int i=0; i<collisionDetector->numCollisions;i++) {
int bodyIdentifierA = collisionMap_h[collisionIdentifierA_h[i]].x;
int bodyIdentifierB = collisionMap_h[collisionIdentifierB_h[i]].x;
printf("Collision #%d: Body %d to %d\n",i,bodyIdentifierA,bodyIdentifierB);
printf(" Collision location: (%f, %f, %f)\n", collisionLocations_h[i].x,collisionLocations_h[i].y,collisionLocations_h[i].z);
printf(" Collision normal: (%f, %f, %f)\n", normalForcePerCollision_h[i].x,normalForcePerCollision_h[i].y,normalForcePerCollision_h[i].z);
printf(" Collision friction: (%f, %f, %f)\n\n", frictionForcePerCollision_h[i].x,frictionForcePerCollision_h[i].y,frictionForcePerCollision_h[i].z);
}
return 0;
}
__global__ void updateNonzerosPerContact(int* nonzerosPerContact, int4* collisionMap, uint* collisionIdentifierA, uint* collisionIdentifierB, int numBodies, int numBeams, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
int numNonzeros = 0;
int bodyIdentifierA = collisionMap[collisionIdentifierA[index]].x;
int bodyIdentifierB = collisionMap[collisionIdentifierB[index]].x;
if(bodyIdentifierA<numBodies) {
numNonzeros+=9;
}
else if(bodyIdentifierA<(numBodies+numBeams)) {
numNonzeros+=36;
}
else {
numNonzeros+=108;
}
if(bodyIdentifierB<numBodies) {
numNonzeros+=9;
}
else if(bodyIdentifierB<(numBodies+numBeams)) {
numNonzeros+=36;
}
else {
numNonzeros+=108;
}
nonzerosPerContact[index] = numNonzeros;
}
int System::buildContactJacobian() {
// update nonzeros per contact
int totalNonzeros = 0;
nonzerosPerContact_d.resize(collisionDetector->numCollisions);
if(collisionDetector->numCollisions) {
updateNonzerosPerContact<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTI1(nonzerosPerContact_d), CASTI4(collisionMap_d), CASTU1(collisionDetector->collisionIdentifierA_d), CASTU1(collisionDetector->collisionIdentifierB_d), bodies.size(), beams.size(), collisionDetector->numCollisions);
Thrust_Inclusive_Scan_Sum(nonzerosPerContact_d, totalNonzeros);
}
totalNonzeros+=offsetConstraintsDOF+7*constraintsSpherical_ShellNodeToBody2D_d.size(); //Add in space for the bilateralDOF entries
DI_d.resize(totalNonzeros);
DJ_d.resize(totalNonzeros);
D_d.resize(totalNonzeros);
friction_d.resize(collisionDetector->numCollisions);
if(constraintsBilateralDOF_d.size()) constructBilateralJacobian<<<BLOCKS(constraintsBilateralDOF_d.size()),THREADS>>>(CASTI2(constraintsBilateralDOF_d), CASTI1(offsetBilaterals_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), constraintsBilateralDOF_d.size());
if(constraintsSpherical_ShellNodeToBody2D_d.size()) constructSpherical_ShellNodeToBody2DJacobian<<<BLOCKS(constraintsSpherical_ShellNodeToBody2D_d.size()),THREADS>>>(CASTI3(constraintsSpherical_ShellNodeToBody2D_d), CASTD3(pSpherical_ShellNodeToBody2D_d), CASTD1(p_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), constraintsBilateralDOF_d.size(), offsetConstraintsDOF, constraintsSpherical_ShellNodeToBody2D_d.size());
if(collisionDetector->numCollisions) constructContactJacobian<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTI1(nonzerosPerContact_d), CASTI4(collisionMap_d), CASTI4(shellConnectivities_d), CASTD3(contactGeometry_d), CASTD3(collisionGeometry_d), CASTI1(DI_d), CASTI1(DJ_d), CASTD1(D_d), CASTD1(friction_d), frictionCoefficient, CASTD4(collisionDetector->normalsAndPenetrations_d), CASTU1(collisionDetector->collisionIdentifierA_d), CASTU1(collisionDetector->collisionIdentifierB_d), CASTI1(indices_d), bodies.size(), beams.size(), plates.size(), body2Ds.size(), offsetConstraintsDOF+7*constraintsSpherical_ShellNodeToBody2D_d.size(), constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), collisionDetector->numCollisions);
// create contact jacobian using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(DI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + DI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(DJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + DJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(D_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + D_d.size());
D = DeviceView(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), 3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+3*nodes_h.size(), D_d.size(), row_indices, column_indices, values);
// end create contact jacobian
buildContactJacobianTranspose();
return 0;
}
int System::buildContactJacobianTranspose() {
DTI_d = DJ_d;
DTJ_d = DI_d;
DT_d = D_d;
// create contact jacobian using cusp library
thrust::device_ptr<int> wrapped_device_I(CASTI1(DTI_d));
DeviceIndexArrayView row_indices = DeviceIndexArrayView(wrapped_device_I, wrapped_device_I + DI_d.size());
thrust::device_ptr<int> wrapped_device_J(CASTI1(DTJ_d));
DeviceIndexArrayView column_indices = DeviceIndexArrayView(wrapped_device_J, wrapped_device_J + DJ_d.size());
thrust::device_ptr<double> wrapped_device_V(CASTD1(DT_d));
DeviceValueArrayView values = DeviceValueArrayView(wrapped_device_V, wrapped_device_V + D_d.size());
DT = DeviceView(3*bodies.size()+12*beams.size()+36*plates.size()+3*body2Ds.size()+3*nodes_h.size(), 3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), DT_d.size(), row_indices, column_indices, values);
// end create contact jacobian
DT.sort_by_row(); // TODO: Do I need this?
return 0;
}
__global__ void multiplyByMass(double* massInv, double* src, double* dst, uint numDOF) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numDOF);
double mass = massInv[index];
if(mass) mass = 1.0/mass;
dst[index] = mass*src[index];
}
__global__ void multiplyByBeamMass(double3* geometries, double3* materials, double* src, double* dst, uint numBodies, uint numBeams) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBeams);
double3 geometry = geometries[numBodies+index];
double A = PI*geometry.x*geometry.x;
double l = geometry.y;
double rho = materials[index].x;
uint offset = 3*numBodies+12*index;
dst[offset+0 ] = (13.0*A*rho*src[0+offset])/35.0 + (9.0*A*rho*src[6+offset])/70.0 + (11.0*A*l*rho*src[3+offset])/210.0 - (13.0*A*l*rho*src[9 +offset])/420.0;
dst[offset+1 ] = (13.0*A*rho*src[1+offset])/35.0 + (9.0*A*rho*src[7+offset])/70.0 + (11.0*A*l*rho*src[4+offset])/210.0 - (13.0*A*l*rho*src[10+offset])/420.0;
dst[offset+2 ] = (13.0*A*rho*src[2+offset])/35.0 + (9.0*A*rho*src[8+offset])/70.0 + (11.0*A*l*rho*src[5+offset])/210.0 - (13.0*A*l*rho*src[11+offset])/420.0;
dst[offset+3 ] = (A*l*l*rho*src[3+offset])/105.0 - (A*l*l*rho*src[9 +offset])/140.0 + (11.0*A*l*rho*src[0+offset])/210.0 + (13.0*A*l*rho*src[6+offset])/420.0;
dst[offset+4 ] = (A*l*l*rho*src[4+offset])/105.0 - (A*l*l*rho*src[10+offset])/140.0 + (11.0*A*l*rho*src[1+offset])/210.0 + (13.0*A*l*rho*src[7+offset])/420.0;
dst[offset+5 ] = (A*l*l*rho*src[5+offset])/105.0 - (A*l*l*rho*src[11+offset])/140.0 + (11.0*A*l*rho*src[2+offset])/210.0 + (13.0*A*l*rho*src[8+offset])/420.0;
dst[offset+6 ] = (9.0*A*rho*src[0+offset])/70.0 + (13.0*A*rho*src[6+offset])/35.0 + (13.0*A*l*rho*src[3+offset])/420.0 - (11.0*A*l*rho*src[9 +offset])/210.0;
dst[offset+7 ] = (9.0*A*rho*src[1+offset])/70.0 + (13.0*A*rho*src[7+offset])/35.0 + (13.0*A*l*rho*src[4+offset])/420.0 - (11.0*A*l*rho*src[10+offset])/210.0;
dst[offset+8 ] = (9.0*A*rho*src[2+offset])/70.0 + (13.0*A*rho*src[8+offset])/35.0 + (13.0*A*l*rho*src[5+offset])/420.0 - (11.0*A*l*rho*src[11+offset])/210.0;
dst[offset+9 ] = (A*l*l*rho*src[9 +offset])/105.0 - (A*l*l*rho*src[3+offset])/140.0 - (13.0*A*l*rho*src[0+offset])/420.0 - (11.0*A*l*rho*src[6+offset])/210.0;
dst[offset+10] = (A*l*l*rho*src[10+offset])/105.0 - (A*l*l*rho*src[4+offset])/140.0 - (13.0*A*l*rho*src[1+offset])/420.0 - (11.0*A*l*rho*src[7+offset])/210.0;
dst[offset+11] = (A*l*l*rho*src[11+offset])/105.0 - (A*l*l*rho*src[5+offset])/140.0 - (13.0*A*l*rho*src[2+offset])/420.0 - (11.0*A*l*rho*src[8+offset])/210.0;
}
__global__ void multiplyByPlateMass(double3* geometries, double4* materials, double* src, double* dst, uint numBodies, uint numBeams, uint numPlates) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numPlates);
double3 geometry = geometries[numBodies+numBeams+index];
double a = geometry.x;
double b = geometry.y;
double rho = materials[index].x;
double th = materials[index].w;
uint offset = 3*numBodies+12*numBeams+36*index;
dst[offset+0] = rho*th*src[0+offset]*1.370634920634921E-1+rho*th*src[9+offset]*4.865079365079365E-2+rho*th*src[18+offset]*1.563492063492063E-2+rho*th*src[27+offset]*4.865079365079365E-2+a*rho*th*src[3+offset]*1.829365079365079E-2-a*rho*th*src[12+offset]*1.087301587301587E-2-a*rho*th*src[21+offset]*4.603174603174603E-3+a*rho*th*src[30+offset]*7.896825396825397E-3+b*rho*th*src[6+offset]*1.829365079365079E-2+b*rho*th*src[15+offset]*7.896825396825397E-3-b*rho*th*src[24+offset]*4.603174603174603E-3-b*rho*th*src[33+offset]*1.087301587301587E-2;
dst[offset+1] = rho*th*src[1+offset]*1.370634920634921E-1+rho*th*src[10+offset]*4.865079365079365E-2+rho*th*src[19+offset]*1.563492063492063E-2+rho*th*src[28+offset]*4.865079365079365E-2+a*rho*th*src[4+offset]*1.829365079365079E-2-a*rho*th*src[13+offset]*1.087301587301587E-2-a*rho*th*src[22+offset]*4.603174603174603E-3+a*rho*th*src[31+offset]*7.896825396825397E-3+b*rho*th*src[7+offset]*1.829365079365079E-2+b*rho*th*src[16+offset]*7.896825396825397E-3-b*rho*th*src[25+offset]*4.603174603174603E-3-b*rho*th*src[34+offset]*1.087301587301587E-2;
dst[offset+2] = rho*th*src[2+offset]*1.370634920634921E-1+rho*th*src[11+offset]*4.865079365079365E-2+rho*th*src[20+offset]*1.563492063492063E-2+rho*th*src[29+offset]*4.865079365079365E-2+a*rho*th*src[5+offset]*1.829365079365079E-2-a*rho*th*src[14+offset]*1.087301587301587E-2-a*rho*th*src[23+offset]*4.603174603174603E-3+a*rho*th*src[32+offset]*7.896825396825397E-3+b*rho*th*src[8+offset]*1.829365079365079E-2+b*rho*th*src[17+offset]*7.896825396825397E-3-b*rho*th*src[26+offset]*4.603174603174603E-3-b*rho*th*src[35+offset]*1.087301587301587E-2;
dst[offset+3] = a*rho*th*src[0+offset]*1.829365079365079E-2+a*rho*th*src[9+offset]*1.087301587301587E-2+a*rho*th*src[18+offset]*4.603174603174603E-3+a*rho*th*src[27+offset]*7.896825396825397E-3+(a*a)*rho*th*src[3+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[12+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[21+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[30+offset]*(1.0/6.3E2)+a*b*rho*th*src[6+offset]*(1.0/4.0E2)+a*b*rho*th*src[15+offset]*(1.0/6.0E2)-a*b*rho*th*src[24+offset]*(1.0/9.0E2)-a*b*rho*th*src[33+offset]*(1.0/6.0E2);
dst[offset+4] = a*rho*th*src[1+offset]*1.829365079365079E-2+a*rho*th*src[10+offset]*1.087301587301587E-2+a*rho*th*src[19+offset]*4.603174603174603E-3+a*rho*th*src[28+offset]*7.896825396825397E-3+(a*a)*rho*th*src[4+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[13+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[22+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[31+offset]*(1.0/6.3E2)+a*b*rho*th*src[7+offset]*(1.0/4.0E2)+a*b*rho*th*src[16+offset]*(1.0/6.0E2)-a*b*rho*th*src[25+offset]*(1.0/9.0E2)-a*b*rho*th*src[34+offset]*(1.0/6.0E2);
dst[offset+5] = a*rho*th*src[2+offset]*1.829365079365079E-2+a*rho*th*src[11+offset]*1.087301587301587E-2+a*rho*th*src[20+offset]*4.603174603174603E-3+a*rho*th*src[29+offset]*7.896825396825397E-3+(a*a)*rho*th*src[5+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[14+offset]*(1.0/4.2E2)-(a*a)*rho*th*src[23+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[32+offset]*(1.0/6.3E2)+a*b*rho*th*src[8+offset]*(1.0/4.0E2)+a*b*rho*th*src[17+offset]*(1.0/6.0E2)-a*b*rho*th*src[26+offset]*(1.0/9.0E2)-a*b*rho*th*src[35+offset]*(1.0/6.0E2);
dst[offset+6] = b*rho*th*src[0+offset]*1.829365079365079E-2+b*rho*th*src[9+offset]*7.896825396825397E-3+b*rho*th*src[18+offset]*4.603174603174603E-3+b*rho*th*src[27+offset]*1.087301587301587E-2+(b*b)*rho*th*src[6+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[15+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[24+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[33+offset]*(1.0/4.2E2)+a*b*rho*th*src[3+offset]*(1.0/4.0E2)-a*b*rho*th*src[12+offset]*(1.0/6.0E2)-a*b*rho*th*src[21+offset]*(1.0/9.0E2)+a*b*rho*th*src[30+offset]*(1.0/6.0E2);
dst[offset+7] = b*rho*th*src[1+offset]*1.829365079365079E-2+b*rho*th*src[10+offset]*7.896825396825397E-3+b*rho*th*src[19+offset]*4.603174603174603E-3+b*rho*th*src[28+offset]*1.087301587301587E-2+(b*b)*rho*th*src[7+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[16+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[25+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[34+offset]*(1.0/4.2E2)+a*b*rho*th*src[4+offset]*(1.0/4.0E2)-a*b*rho*th*src[13+offset]*(1.0/6.0E2)-a*b*rho*th*src[22+offset]*(1.0/9.0E2)+a*b*rho*th*src[31+offset]*(1.0/6.0E2);
dst[offset+8] = b*rho*th*src[2+offset]*1.829365079365079E-2+b*rho*th*src[11+offset]*7.896825396825397E-3+b*rho*th*src[20+offset]*4.603174603174603E-3+b*rho*th*src[29+offset]*1.087301587301587E-2+(b*b)*rho*th*src[8+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[17+offset]*(1.0/6.3E2)-(b*b)*rho*th*src[26+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[35+offset]*(1.0/4.2E2)+a*b*rho*th*src[5+offset]*(1.0/4.0E2)-a*b*rho*th*src[14+offset]*(1.0/6.0E2)-a*b*rho*th*src[23+offset]*(1.0/9.0E2)+a*b*rho*th*src[32+offset]*(1.0/6.0E2);
dst[offset+9] = rho*th*src[0+offset]*4.865079365079365E-2+rho*th*src[9+offset]*1.370634920634921E-1+rho*th*src[18+offset]*4.865079365079365E-2+rho*th*src[27+offset]*1.563492063492063E-2+a*rho*th*src[3+offset]*1.087301587301587E-2-a*rho*th*src[12+offset]*1.829365079365079E-2-a*rho*th*src[21+offset]*7.896825396825397E-3+a*rho*th*src[30+offset]*4.603174603174603E-3+b*rho*th*src[6+offset]*7.896825396825397E-3+b*rho*th*src[15+offset]*1.829365079365079E-2-b*rho*th*src[24+offset]*1.087301587301587E-2-b*rho*th*src[33+offset]*4.603174603174603E-3;
dst[offset+10] = rho*th*src[1+offset]*4.865079365079365E-2+rho*th*src[10+offset]*1.370634920634921E-1+rho*th*src[19+offset]*4.865079365079365E-2+rho*th*src[28+offset]*1.563492063492063E-2+a*rho*th*src[4+offset]*1.087301587301587E-2-a*rho*th*src[13+offset]*1.829365079365079E-2-a*rho*th*src[22+offset]*7.896825396825397E-3+a*rho*th*src[31+offset]*4.603174603174603E-3+b*rho*th*src[7+offset]*7.896825396825397E-3+b*rho*th*src[16+offset]*1.829365079365079E-2-b*rho*th*src[25+offset]*1.087301587301587E-2-b*rho*th*src[34+offset]*4.603174603174603E-3;
dst[offset+11] = rho*th*src[2+offset]*4.865079365079365E-2+rho*th*src[11+offset]*1.370634920634921E-1+rho*th*src[20+offset]*4.865079365079365E-2+rho*th*src[29+offset]*1.563492063492063E-2+a*rho*th*src[5+offset]*1.087301587301587E-2-a*rho*th*src[14+offset]*1.829365079365079E-2-a*rho*th*src[23+offset]*7.896825396825397E-3+a*rho*th*src[32+offset]*4.603174603174603E-3+b*rho*th*src[8+offset]*7.896825396825397E-3+b*rho*th*src[17+offset]*1.829365079365079E-2-b*rho*th*src[26+offset]*1.087301587301587E-2-b*rho*th*src[35+offset]*4.603174603174603E-3;
dst[offset+12] = a*rho*th*src[0+offset]*(-1.087301587301587E-2)-a*rho*th*src[9+offset]*1.829365079365079E-2-a*rho*th*src[18+offset]*7.896825396825397E-3-a*rho*th*src[27+offset]*4.603174603174603E-3-(a*a)*rho*th*src[3+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[12+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[21+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[30+offset]*(1.0/8.4E2)-a*b*rho*th*src[6+offset]*(1.0/6.0E2)-a*b*rho*th*src[15+offset]*(1.0/4.0E2)+a*b*rho*th*src[24+offset]*(1.0/6.0E2)+a*b*rho*th*src[33+offset]*(1.0/9.0E2);
dst[offset+13] = a*rho*th*src[1+offset]*(-1.087301587301587E-2)-a*rho*th*src[10+offset]*1.829365079365079E-2-a*rho*th*src[19+offset]*7.896825396825397E-3-a*rho*th*src[28+offset]*4.603174603174603E-3-(a*a)*rho*th*src[4+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[13+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[22+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[31+offset]*(1.0/8.4E2)-a*b*rho*th*src[7+offset]*(1.0/6.0E2)-a*b*rho*th*src[16+offset]*(1.0/4.0E2)+a*b*rho*th*src[25+offset]*(1.0/6.0E2)+a*b*rho*th*src[34+offset]*(1.0/9.0E2);
dst[offset+14] = a*rho*th*src[2+offset]*(-1.087301587301587E-2)-a*rho*th*src[11+offset]*1.829365079365079E-2-a*rho*th*src[20+offset]*7.896825396825397E-3-a*rho*th*src[29+offset]*4.603174603174603E-3-(a*a)*rho*th*src[5+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[14+offset]*(1.0/3.15E2)+(a*a)*rho*th*src[23+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[32+offset]*(1.0/8.4E2)-a*b*rho*th*src[8+offset]*(1.0/6.0E2)-a*b*rho*th*src[17+offset]*(1.0/4.0E2)+a*b*rho*th*src[26+offset]*(1.0/6.0E2)+a*b*rho*th*src[35+offset]*(1.0/9.0E2);
dst[offset+15] = b*rho*th*src[0+offset]*7.896825396825397E-3+b*rho*th*src[9+offset]*1.829365079365079E-2+b*rho*th*src[18+offset]*1.087301587301587E-2+b*rho*th*src[27+offset]*4.603174603174603E-3+(b*b)*rho*th*src[6+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[15+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[24+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[33+offset]*(1.0/8.4E2)+a*b*rho*th*src[3+offset]*(1.0/6.0E2)-a*b*rho*th*src[12+offset]*(1.0/4.0E2)-a*b*rho*th*src[21+offset]*(1.0/6.0E2)+a*b*rho*th*src[30+offset]*(1.0/9.0E2);
dst[offset+16] = b*rho*th*src[1+offset]*7.896825396825397E-3+b*rho*th*src[10+offset]*1.829365079365079E-2+b*rho*th*src[19+offset]*1.087301587301587E-2+b*rho*th*src[28+offset]*4.603174603174603E-3+(b*b)*rho*th*src[7+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[16+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[25+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[34+offset]*(1.0/8.4E2)+a*b*rho*th*src[4+offset]*(1.0/6.0E2)-a*b*rho*th*src[13+offset]*(1.0/4.0E2)-a*b*rho*th*src[22+offset]*(1.0/6.0E2)+a*b*rho*th*src[31+offset]*(1.0/9.0E2);
dst[offset+17] = b*rho*th*src[2+offset]*7.896825396825397E-3+b*rho*th*src[11+offset]*1.829365079365079E-2+b*rho*th*src[20+offset]*1.087301587301587E-2+b*rho*th*src[29+offset]*4.603174603174603E-3+(b*b)*rho*th*src[8+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[17+offset]*(1.0/3.15E2)-(b*b)*rho*th*src[26+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[35+offset]*(1.0/8.4E2)+a*b*rho*th*src[5+offset]*(1.0/6.0E2)-a*b*rho*th*src[14+offset]*(1.0/4.0E2)-a*b*rho*th*src[23+offset]*(1.0/6.0E2)+a*b*rho*th*src[32+offset]*(1.0/9.0E2);
dst[offset+18] = rho*th*src[0+offset]*1.563492063492063E-2+rho*th*src[9+offset]*4.865079365079365E-2+rho*th*src[18+offset]*1.370634920634921E-1+rho*th*src[27+offset]*4.865079365079365E-2+a*rho*th*src[3+offset]*4.603174603174603E-3-a*rho*th*src[12+offset]*7.896825396825397E-3-a*rho*th*src[21+offset]*1.829365079365079E-2+a*rho*th*src[30+offset]*1.087301587301587E-2+b*rho*th*src[6+offset]*4.603174603174603E-3+b*rho*th*src[15+offset]*1.087301587301587E-2-b*rho*th*src[24+offset]*1.829365079365079E-2-b*rho*th*src[33+offset]*7.896825396825397E-3;
dst[offset+19] = rho*th*src[1+offset]*1.563492063492063E-2+rho*th*src[10+offset]*4.865079365079365E-2+rho*th*src[19+offset]*1.370634920634921E-1+rho*th*src[28+offset]*4.865079365079365E-2+a*rho*th*src[4+offset]*4.603174603174603E-3-a*rho*th*src[13+offset]*7.896825396825397E-3-a*rho*th*src[22+offset]*1.829365079365079E-2+a*rho*th*src[31+offset]*1.087301587301587E-2+b*rho*th*src[7+offset]*4.603174603174603E-3+b*rho*th*src[16+offset]*1.087301587301587E-2-b*rho*th*src[25+offset]*1.829365079365079E-2-b*rho*th*src[34+offset]*7.896825396825397E-3;
dst[offset+20] = rho*th*src[2+offset]*1.563492063492063E-2+rho*th*src[11+offset]*4.865079365079365E-2+rho*th*src[20+offset]*1.370634920634921E-1+rho*th*src[29+offset]*4.865079365079365E-2+a*rho*th*src[5+offset]*4.603174603174603E-3-a*rho*th*src[14+offset]*7.896825396825397E-3-a*rho*th*src[23+offset]*1.829365079365079E-2+a*rho*th*src[32+offset]*1.087301587301587E-2+b*rho*th*src[8+offset]*4.603174603174603E-3+b*rho*th*src[17+offset]*1.087301587301587E-2-b*rho*th*src[26+offset]*1.829365079365079E-2-b*rho*th*src[35+offset]*7.896825396825397E-3;
dst[offset+21] = a*rho*th*src[0+offset]*(-4.603174603174603E-3)-a*rho*th*src[9+offset]*7.896825396825397E-3-a*rho*th*src[18+offset]*1.829365079365079E-2-a*rho*th*src[27+offset]*1.087301587301587E-2-(a*a)*rho*th*src[3+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[12+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[21+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[30+offset]*(1.0/4.2E2)-a*b*rho*th*src[6+offset]*(1.0/9.0E2)-a*b*rho*th*src[15+offset]*(1.0/6.0E2)+a*b*rho*th*src[24+offset]*(1.0/4.0E2)+a*b*rho*th*src[33+offset]*(1.0/6.0E2);
dst[offset+22] = a*rho*th*src[1+offset]*(-4.603174603174603E-3)-a*rho*th*src[10+offset]*7.896825396825397E-3-a*rho*th*src[19+offset]*1.829365079365079E-2-a*rho*th*src[28+offset]*1.087301587301587E-2-(a*a)*rho*th*src[4+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[13+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[22+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[31+offset]*(1.0/4.2E2)-a*b*rho*th*src[7+offset]*(1.0/9.0E2)-a*b*rho*th*src[16+offset]*(1.0/6.0E2)+a*b*rho*th*src[25+offset]*(1.0/4.0E2)+a*b*rho*th*src[34+offset]*(1.0/6.0E2);
dst[offset+23] = a*rho*th*src[2+offset]*(-4.603174603174603E-3)-a*rho*th*src[11+offset]*7.896825396825397E-3-a*rho*th*src[20+offset]*1.829365079365079E-2-a*rho*th*src[29+offset]*1.087301587301587E-2-(a*a)*rho*th*src[5+offset]*(1.0/8.4E2)+(a*a)*rho*th*src[14+offset]*(1.0/6.3E2)+(a*a)*rho*th*src[23+offset]*(1.0/3.15E2)-(a*a)*rho*th*src[32+offset]*(1.0/4.2E2)-a*b*rho*th*src[8+offset]*(1.0/9.0E2)-a*b*rho*th*src[17+offset]*(1.0/6.0E2)+a*b*rho*th*src[26+offset]*(1.0/4.0E2)+a*b*rho*th*src[35+offset]*(1.0/6.0E2);
dst[offset+24] = b*rho*th*src[0+offset]*(-4.603174603174603E-3)-b*rho*th*src[9+offset]*1.087301587301587E-2-b*rho*th*src[18+offset]*1.829365079365079E-2-b*rho*th*src[27+offset]*7.896825396825397E-3-(b*b)*rho*th*src[6+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[15+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[24+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[33+offset]*(1.0/6.3E2)-a*b*rho*th*src[3+offset]*(1.0/9.0E2)+a*b*rho*th*src[12+offset]*(1.0/6.0E2)+a*b*rho*th*src[21+offset]*(1.0/4.0E2)-a*b*rho*th*src[30+offset]*(1.0/6.0E2);
dst[offset+25] = b*rho*th*src[1+offset]*(-4.603174603174603E-3)-b*rho*th*src[10+offset]*1.087301587301587E-2-b*rho*th*src[19+offset]*1.829365079365079E-2-b*rho*th*src[28+offset]*7.896825396825397E-3-(b*b)*rho*th*src[7+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[16+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[25+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[34+offset]*(1.0/6.3E2)-a*b*rho*th*src[4+offset]*(1.0/9.0E2)+a*b*rho*th*src[13+offset]*(1.0/6.0E2)+a*b*rho*th*src[22+offset]*(1.0/4.0E2)-a*b*rho*th*src[31+offset]*(1.0/6.0E2);
dst[offset+26] = b*rho*th*src[2+offset]*(-4.603174603174603E-3)-b*rho*th*src[11+offset]*1.087301587301587E-2-b*rho*th*src[20+offset]*1.829365079365079E-2-b*rho*th*src[29+offset]*7.896825396825397E-3-(b*b)*rho*th*src[8+offset]*(1.0/8.4E2)-(b*b)*rho*th*src[17+offset]*(1.0/4.2E2)+(b*b)*rho*th*src[26+offset]*(1.0/3.15E2)+(b*b)*rho*th*src[35+offset]*(1.0/6.3E2)-a*b*rho*th*src[5+offset]*(1.0/9.0E2)+a*b*rho*th*src[14+offset]*(1.0/6.0E2)+a*b*rho*th*src[23+offset]*(1.0/4.0E2)-a*b*rho*th*src[32+offset]*(1.0/6.0E2);
dst[offset+27] = rho*th*src[0+offset]*4.865079365079365E-2+rho*th*src[9+offset]*1.563492063492063E-2+rho*th*src[18+offset]*4.865079365079365E-2+rho*th*src[27+offset]*1.370634920634921E-1+a*rho*th*src[3+offset]*7.896825396825397E-3-a*rho*th*src[12+offset]*4.603174603174603E-3-a*rho*th*src[21+offset]*1.087301587301587E-2+a*rho*th*src[30+offset]*1.829365079365079E-2+b*rho*th*src[6+offset]*1.087301587301587E-2+b*rho*th*src[15+offset]*4.603174603174603E-3-b*rho*th*src[24+offset]*7.896825396825397E-3-b*rho*th*src[33+offset]*1.829365079365079E-2;
dst[offset+28] = rho*th*src[1+offset]*4.865079365079365E-2+rho*th*src[10+offset]*1.563492063492063E-2+rho*th*src[19+offset]*4.865079365079365E-2+rho*th*src[28+offset]*1.370634920634921E-1+a*rho*th*src[4+offset]*7.896825396825397E-3-a*rho*th*src[13+offset]*4.603174603174603E-3-a*rho*th*src[22+offset]*1.087301587301587E-2+a*rho*th*src[31+offset]*1.829365079365079E-2+b*rho*th*src[7+offset]*1.087301587301587E-2+b*rho*th*src[16+offset]*4.603174603174603E-3-b*rho*th*src[25+offset]*7.896825396825397E-3-b*rho*th*src[34+offset]*1.829365079365079E-2;
dst[offset+29] = rho*th*src[2+offset]*4.865079365079365E-2+rho*th*src[11+offset]*1.563492063492063E-2+rho*th*src[20+offset]*4.865079365079365E-2+rho*th*src[29+offset]*1.370634920634921E-1+a*rho*th*src[5+offset]*7.896825396825397E-3-a*rho*th*src[14+offset]*4.603174603174603E-3-a*rho*th*src[23+offset]*1.087301587301587E-2+a*rho*th*src[32+offset]*1.829365079365079E-2+b*rho*th*src[8+offset]*1.087301587301587E-2+b*rho*th*src[17+offset]*4.603174603174603E-3-b*rho*th*src[26+offset]*7.896825396825397E-3-b*rho*th*src[35+offset]*1.829365079365079E-2;
dst[offset+30] = a*rho*th*src[0+offset]*7.896825396825397E-3+a*rho*th*src[9+offset]*4.603174603174603E-3+a*rho*th*src[18+offset]*1.087301587301587E-2+a*rho*th*src[27+offset]*1.829365079365079E-2+(a*a)*rho*th*src[3+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[12+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[21+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[30+offset]*(1.0/3.15E2)+a*b*rho*th*src[6+offset]*(1.0/6.0E2)+a*b*rho*th*src[15+offset]*(1.0/9.0E2)-a*b*rho*th*src[24+offset]*(1.0/6.0E2)-a*b*rho*th*src[33+offset]*(1.0/4.0E2);
dst[offset+31] = a*rho*th*src[1+offset]*7.896825396825397E-3+a*rho*th*src[10+offset]*4.603174603174603E-3+a*rho*th*src[19+offset]*1.087301587301587E-2+a*rho*th*src[28+offset]*1.829365079365079E-2+(a*a)*rho*th*src[4+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[13+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[22+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[31+offset]*(1.0/3.15E2)+a*b*rho*th*src[7+offset]*(1.0/6.0E2)+a*b*rho*th*src[16+offset]*(1.0/9.0E2)-a*b*rho*th*src[25+offset]*(1.0/6.0E2)-a*b*rho*th*src[34+offset]*(1.0/4.0E2);
dst[offset+32] = a*rho*th*src[2+offset]*7.896825396825397E-3+a*rho*th*src[11+offset]*4.603174603174603E-3+a*rho*th*src[20+offset]*1.087301587301587E-2+a*rho*th*src[29+offset]*1.829365079365079E-2+(a*a)*rho*th*src[5+offset]*(1.0/6.3E2)-(a*a)*rho*th*src[14+offset]*(1.0/8.4E2)-(a*a)*rho*th*src[23+offset]*(1.0/4.2E2)+(a*a)*rho*th*src[32+offset]*(1.0/3.15E2)+a*b*rho*th*src[8+offset]*(1.0/6.0E2)+a*b*rho*th*src[17+offset]*(1.0/9.0E2)-a*b*rho*th*src[26+offset]*(1.0/6.0E2)-a*b*rho*th*src[35+offset]*(1.0/4.0E2);
dst[offset+33] = b*rho*th*src[0+offset]*(-1.087301587301587E-2)-b*rho*th*src[9+offset]*4.603174603174603E-3-b*rho*th*src[18+offset]*7.896825396825397E-3-b*rho*th*src[27+offset]*1.829365079365079E-2-(b*b)*rho*th*src[6+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[15+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[24+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[33+offset]*(1.0/3.15E2)-a*b*rho*th*src[3+offset]*(1.0/6.0E2)+a*b*rho*th*src[12+offset]*(1.0/9.0E2)+a*b*rho*th*src[21+offset]*(1.0/6.0E2)-a*b*rho*th*src[30+offset]*(1.0/4.0E2);
dst[offset+34] = b*rho*th*src[1+offset]*(-1.087301587301587E-2)-b*rho*th*src[10+offset]*4.603174603174603E-3-b*rho*th*src[19+offset]*7.896825396825397E-3-b*rho*th*src[28+offset]*1.829365079365079E-2-(b*b)*rho*th*src[7+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[16+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[25+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[34+offset]*(1.0/3.15E2)-a*b*rho*th*src[4+offset]*(1.0/6.0E2)+a*b*rho*th*src[13+offset]*(1.0/9.0E2)+a*b*rho*th*src[22+offset]*(1.0/6.0E2)-a*b*rho*th*src[31+offset]*(1.0/4.0E2);
dst[offset+35] = b*rho*th*src[2+offset]*(-1.087301587301587E-2)-b*rho*th*src[11+offset]*4.603174603174603E-3-b*rho*th*src[20+offset]*7.896825396825397E-3-b*rho*th*src[29+offset]*1.829365079365079E-2-(b*b)*rho*th*src[8+offset]*(1.0/4.2E2)-(b*b)*rho*th*src[17+offset]*(1.0/8.4E2)+(b*b)*rho*th*src[26+offset]*(1.0/6.3E2)+(b*b)*rho*th*src[35+offset]*(1.0/3.15E2)-a*b*rho*th*src[5+offset]*(1.0/6.0E2)+a*b*rho*th*src[14+offset]*(1.0/9.0E2)+a*b*rho*th*src[23+offset]*(1.0/6.0E2)-a*b*rho*th*src[32+offset]*(1.0/4.0E2);
}
__global__ void multiplyByBody2DMass(double2* materials, double* src, double* dst, int numBodies, int numBeams, int numPlates, int numBody2Ds) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBody2Ds);
double mass = materials[index].x;
double inertia = materials[index].y;
uint offset = 3*numBodies+12*numBeams+36*numPlates+3*index;
dst[offset+0] = mass*src[offset+0];
dst[offset+1] = mass*src[offset+1];
dst[offset+2] = inertia*src[offset+2];
}
int System::buildAppliedImpulseVector() {
// build k
updateElasticForces();
if(bodies.size()) multiplyByMass<<<BLOCKS(3*bodies.size()),THREADS>>>(CASTD1(mass_d), CASTD1(v_d), CASTD1(k_d), 3*bodies.size());
if(beams.size()) multiplyByBeamMass<<<BLOCKS(beams.size()),THREADS>>>(CASTD3(contactGeometry_d), CASTD3(materialsBeam_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size());
if(plates.size()) multiplyByPlateMass<<<BLOCKS(plates.size()),THREADS>>>(CASTD3(contactGeometry_d), CASTD4(materialsPlate_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size(), plates.size());
if(body2Ds.size()) multiplyByBody2DMass<<<BLOCKS(body2Ds.size()),THREADS>>>(CASTD2(materialsBody2D_d), CASTD1(v_d), CASTD1(k_d), bodies.size(), beams.size(), plates.size(), body2Ds.size());
if(nodes_h.size()) cusp::multiply(mass_shellMesh,v_shellMesh,k_shellMesh);
//cusp::blas::axpy(fElastic,fApplied,-1.0); //TODO: Come up with a fix for applied forces
cusp::blas::axpbypcz(f,fElastic,k,k,h,-h,1.0);
return 0;
}
__global__ void buildStabilization(double* b, double4* normalsAndPenetrations, double timeStep, uint offsetBilateralConstraints, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double penetration = normalsAndPenetrations[index].w;
b[3*index+offsetBilateralConstraints] = penetration/timeStep;
b[3*index+1+offsetBilateralConstraints] = 0;
b[3*index+2+offsetBilateralConstraints] = 0;
}
__global__ void buildStabilizationBilateral(double* b, double3* infoConstraintBilateralDOF, int2* constraintBilateralDOF, double* p, double timeStep, double time, uint numBilateralConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numBilateralConstraints);
int2 constraintDOF = constraintBilateralDOF[index];
double3 info = infoConstraintBilateralDOF[index];
double tStart = info.y;
double velocity = info.x;
if(time<tStart) velocity = 0;
double p0 = info.z;
double violation = 0;
if(constraintDOF.y<0) {
violation = p[constraintDOF.x]-p0-velocity*(time-tStart);
} else {
violation = p[constraintDOF.x]-p[constraintDOF.y]-velocity*(time-tStart);
}
b[index] = violation/timeStep;
}
__global__ void buildStabilizationSpherical_ShellNodeToBody2D(double* b, int3* constraints, double3* pHats, double* p, double timeStep, uint numDOFConstraints, int numConstraints) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numConstraints);
int offset = numDOFConstraints;
int3 constraint = constraints[index];
double3 pHat = pHats[index];
int indexS = constraint.x;
int indexB = constraint.y;
double phi = p[indexB+2];
b[3*index+offset+0] = (p[indexB]+pHat.x*cos(phi)-pHat.y*sin(phi)-p[indexS])/timeStep;
b[3*index+offset+1] = (p[indexB+1]+pHat.x*sin(phi)+pHat.y*cos(phi)-p[indexS+1])/timeStep;
b[3*index+offset+2] = (pHat.z-p[indexS+2])/timeStep;
}
int System::buildSchurVector() {
// build r
r_d.resize(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size());
b_d.resize(3*collisionDetector->numCollisions+constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size());
// TODO: There's got to be a better way to do this...
//r.resize(3*collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_r(CASTD1(r_d));
r = DeviceValueArrayView(wrapped_device_r, wrapped_device_r + r_d.size());
thrust::device_ptr<double> wrapped_device_b(CASTD1(b_d));
b = DeviceValueArrayView(wrapped_device_b, wrapped_device_b + b_d.size());
cusp::multiply(mass,k,tmp);
cusp::multiply(D,tmp,r);
if(constraintsBilateralDOF_d.size()) buildStabilizationBilateral<<<BLOCKS(constraintsBilateralDOF_d.size()),THREADS>>>(CASTD1(b_d), CASTD3(infoConstraintBilateralDOF_d), CASTI2(constraintsBilateralDOF_d), CASTD1(p_d), h, time, constraintsBilateralDOF_d.size());
if(constraintsSpherical_ShellNodeToBody2D_d.size()) buildStabilizationSpherical_ShellNodeToBody2D<<<BLOCKS(constraintsSpherical_ShellNodeToBody2D_d.size()),THREADS>>>(CASTD1(b_d), CASTI3(constraintsSpherical_ShellNodeToBody2D_d), CASTD3(pSpherical_ShellNodeToBody2D_d), CASTD1(p_d), h, constraintsBilateralDOF_d.size(), constraintsSpherical_ShellNodeToBody2D_d.size());
if(collisionDetector->numCollisions) buildStabilization<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTD1(b_d), CASTD4(collisionDetector->normalsAndPenetrations_d), h, constraintsBilateralDOF_d.size()+3*constraintsSpherical_ShellNodeToBody2D_d.size(), collisionDetector->numCollisions);
cusp::blas::axpy(b,r,1.0);
return 0;
}
int System::buildSchurMatrix() {
// build N
cusp::multiply(mass,DT,MinvDT);
cusp::multiply(D,MinvDT,N);
return 0;
}
__global__ void getNormalComponent(double* src, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
dst[index] = src[3*index];
}
__global__ void calculateConeViolation(double* gamma, double* friction, double* dst, uint numCollisions) {
INIT_CHECK_THREAD_BOUNDED(INDEX1D, numCollisions);
double gamma_t = sqrt(pow(gamma[3*index+1],2.0)+pow(gamma[3*index+2],2.0));
double coneViolation = friction[index]*gamma[3*index] - gamma_t; // TODO: Keep the friction indexing in mind for bilaterals
if(coneViolation>0) coneViolation = 0;
dst[index] = coneViolation;
}
double4 System::getCCPViolation() {
double4 violationCCP = make_double4(0,0,0,0);
if(collisionDetector->numCollisions) {
// Build normal impulse vector, gamma_n
thrust::device_vector<double> gamma_n_d;
gamma_n_d.resize(collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_gamma_n(CASTD1(gamma_n_d));
DeviceValueArrayView gamma_n = DeviceValueArrayView(wrapped_device_gamma_n, wrapped_device_gamma_n + gamma_n_d.size());
getNormalComponent<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTD1(gamma_d), CASTD1(gamma_n_d), collisionDetector->numCollisions);
violationCCP.x = Thrust_Min(gamma_n_d);
if(violationCCP.x > 0) violationCCP.x = 0;
// Build normal velocity vector, v_n
thrust::device_vector<double> tmp_gamma_d;
tmp_gamma_d.resize(3*collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_tmp_gamma(CASTD1(tmp_gamma_d));
DeviceValueArrayView tmp_gamma = DeviceValueArrayView(wrapped_device_tmp_gamma, wrapped_device_tmp_gamma + tmp_gamma_d.size());
thrust::device_vector<double> v_n_d;
v_n_d.resize(collisionDetector->numCollisions);
thrust::device_ptr<double> wrapped_device_v_n(CASTD1(v_n_d));
DeviceValueArrayView v_n = DeviceValueArrayView(wrapped_device_v_n, wrapped_device_v_n + v_n_d.size());
cusp::multiply(D,v,tmp_gamma);
cusp::blas::axpy(b,tmp_gamma,1.0);
getNormalComponent<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTD1(tmp_gamma_d), CASTD1(v_n_d), collisionDetector->numCollisions);
violationCCP.y = Thrust_Min(v_n_d);
if(violationCCP.y > 0) violationCCP.y = 0;
// Check complementarity condition
violationCCP.z = cusp::blas::dot(gamma_n,v_n);
// Check friction cone condition
calculateConeViolation<<<BLOCKS(collisionDetector->numCollisions),THREADS>>>(CASTD1(gamma_d), CASTD1(friction_d), CASTD1(v_n_d), collisionDetector->numCollisions);
violationCCP.w = cusp::blas::nrm2(v_n);
}
return violationCCP;
}
double System::getPotentialEnergy() {
return -cusp::blas::dot(f,p);
}
double System::getKineticEnergy() {
if(bodies.size()) multiplyByMass<<<BLOCKS(3*bodies.size()),THREADS>>>(CASTD1(mass_d), CASTD1(v_d), CASTD1(tmp_d), 3*bodies.size());
if(beams.size()) multiplyByBeamMass<<<BLOCKS(beams.size()),THREADS>>>(CASTD3(contactGeometry_d), CASTD3(materialsBeam_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size());
if(plates.size()) multiplyByPlateMass<<<BLOCKS(plates.size()),THREADS>>>(CASTD3(contactGeometry_d), CASTD4(materialsPlate_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size(), plates.size());
if(body2Ds.size()) multiplyByBody2DMass<<<BLOCKS(body2Ds.size()),THREADS>>>(CASTD2(materialsBody2D_d), CASTD1(v_d), CASTD1(tmp_d), bodies.size(), beams.size(), plates.size(), body2Ds.size());
if(nodes_h.size()) cusp::multiply(mass_shellMesh,v_shellMesh,tmp_shellMesh);
return 0.5*cusp::blas::dot(v,tmp);
}
double System::getStrainEnergy() {
double strainEnergy = 0;
if(beams.size()) strainEnergy+=thrust::reduce(strainEnergy_d.begin(),strainEnergy_d.end());
if(plates.size()) strainEnergy+=thrust::reduce(strainEnergyPlate_d.begin(),strainEnergyPlate_d.end());
if(nodes_h.size()) strainEnergy+=thrust::reduce(strainEnergyShellMesh_d.begin(),strainEnergyShellMesh_d.end());
return strainEnergy;
}
double System::getTotalEnergy() {
return getPotentialEnergy()+getKineticEnergy()+getStrainEnergy();
}
int System::outputContactForcePerCollision(string filename) {
ofstream filestream;
filestream.open(filename.c_str());
calculateContactForcePerCollision();
// copy device data to host
thrust::host_vector<double3> collisionLocations_h = collisionDetector->collisionLocations_d;
thrust::host_vector<double3> normalForcePerCollision_h = normalForcePerCollision_d;
thrust::host_vector<double3> frictionForcePerCollision_h = frictionForcePerCollision_d;
thrust::host_vector<int4> collisionMap_h = collisionMap_d;
thrust::host_vector<uint> collisionIdentifierA_h = collisionDetector->collisionIdentifierA_d;
thrust::host_vector<uint> collisionIdentifierB_h = collisionDetector->collisionIdentifierB_d;
filestream << collisionDetector->numCollisions << ", " << bodies.size() << ", " << beams.size() << ", " << plates.size()+shellConnectivities_h.size() << ", " << body2Ds.size() << ", " << endl;
for(int i=0; i<collisionDetector->numCollisions;i++) {
int bodyIdentifierA = collisionMap_h[collisionIdentifierA_h[i]].x;
int bodyIdentifierB = collisionMap_h[collisionIdentifierB_h[i]].x;
filestream << i << ", " << bodyIdentifierA << ", " << bodyIdentifierB << ", " << collisionLocations_h[i].x << ", " << collisionLocations_h[i].y << ", " << collisionLocations_h[i].z << ", " << normalForcePerCollision_h[i].x << ", " << normalForcePerCollision_h[i].y << ", " << normalForcePerCollision_h[i].z << ", " << frictionForcePerCollision_h[i].x << ", " << frictionForcePerCollision_h[i].y << ", " << frictionForcePerCollision_h[i].z << ", \n";
}
filestream.close();
return 0;
}
int System::exportSystem(string filename) {
ofstream filestream;
filestream.open(filename.c_str());
p_h = p_d;
v_h = v_d;
f_contact_h = f_contact_d;
filestream << bodies.size() << ", " << beams.size() << ", " << plates.size()+shellConnectivities_h.size() << ", " << body2Ds.size() << ", " << endl;
for (int i = 0; i < bodies.size(); i++) {
filestream
<< i << ", "
<< bodies[i]->isFixed() << ", "
<< p_h[3*i] << ", "
<< p_h[3*i+1] << ", "
<< p_h[3*i+2] << ", "
<< "1, "
<< "0, "
<< "0, "
<< "0, "
<< v_h[3*i] << ", "
<< v_h[3*i+1] << ", "
<< v_h[3*i+2] << ", ";
if(contactGeometry_h[i].y == 0) {
filestream
<< "0, "
<< contactGeometry_h[i].x << ", ";
}
else {
filestream
<< "2, "
<< contactGeometry_h[i].x << ", "
<< contactGeometry_h[i].y << ", "
<< contactGeometry_h[i].z << ", ";
}
filestream
<< "\n";
}
for (int i = 0; i < beams.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream
<< bodies.size()+i << ", "
<< contactGeometry_h[bodies.size()+i].x << ", "
<< contactGeometry_h[bodies.size()+i].y << ", "
<< p_h[3*bodies.size()+12*i] << ", "
<< p_h[3*bodies.size()+12*i+1] << ", "
<< p_h[3*bodies.size()+12*i+2] << ", "
<< p_h[3*bodies.size()+12*i+3] << ", "
<< p_h[3*bodies.size()+12*i+4] << ", "
<< p_h[3*bodies.size()+12*i+5] << ", "
<< p_h[3*bodies.size()+12*i+6] << ", "
<< p_h[3*bodies.size()+12*i+7] << ", "
<< p_h[3*bodies.size()+12*i+8] << ", "
<< p_h[3*bodies.size()+12*i+9] << ", "
<< p_h[3*bodies.size()+12*i+10] << ", "
<< p_h[3*bodies.size()+12*i+11] << ", "
<< v_h[3*bodies.size()+12*i] << ", "
<< v_h[3*bodies.size()+12*i+1] << ", "
<< v_h[3*bodies.size()+12*i+2] << ", "
<< v_h[3*bodies.size()+12*i+3] << ", "
<< v_h[3*bodies.size()+12*i+4] << ", "
<< v_h[3*bodies.size()+12*i+5] << ", "
<< v_h[3*bodies.size()+12*i+6] << ", "
<< v_h[3*bodies.size()+12*i+7] << ", "
<< v_h[3*bodies.size()+12*i+8] << ", "
<< v_h[3*bodies.size()+12*i+9] << ", "
<< v_h[3*bodies.size()+12*i+10] << ", "
<< v_h[3*bodies.size()+12*i+11] << ", ";
filestream
<< "\n";
}
for (int i = 0; i < plates.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream
<< bodies.size()+beams.size()+i << ", "
<< contactGeometry_h[bodies.size()+beams.size()+i].x << ", "
<< contactGeometry_h[bodies.size()+beams.size()+i].y << ", "
<< plates[i]->getThickness() << ", ";
for(int j=0;j<36;j++) {
filestream << p_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
for(int j=0;j<36;j++) {
filestream << v_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
for(int j=0;j<36;j++) {
filestream << f_contact_h[3*bodies.size()+12*beams.size()+36*i+j] << ", ";
}
filestream << "\n";
}
for (int i = 0; i < shellConnectivities_h.size(); i++) {
filestream
<< bodies.size()+beams.size()+plates.size()+body2Ds.size()+i << ", "
<< shellGeometries_h[i].x << ", "
<< shellGeometries_h[i].y << ", "
<< shellGeometries_h[i].z << ", ";
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
double* p0 = &p_h[offset+9*shellConnectivities_h[i].x];
double* p1 = &p_h[offset+9*shellConnectivities_h[i].y];
double* p2 = &p_h[offset+9*shellConnectivities_h[i].z];
double* p3 = &p_h[offset+9*shellConnectivities_h[i].w];
double* v0 = &v_h[offset+9*shellConnectivities_h[i].x];
double* v1 = &v_h[offset+9*shellConnectivities_h[i].y];
double* v2 = &v_h[offset+9*shellConnectivities_h[i].z];
double* v3 = &v_h[offset+9*shellConnectivities_h[i].w];
double* f0 = &f_contact_h[offset+9*shellConnectivities_h[i].x];
double* f1 = &f_contact_h[offset+9*shellConnectivities_h[i].y];
double* f2 = &f_contact_h[offset+9*shellConnectivities_h[i].z];
double* f3 = &f_contact_h[offset+9*shellConnectivities_h[i].w];
for(int j=0;j<9;j++) filestream << p0[j] << ", ";
for(int j=0;j<9;j++) filestream << p1[j] << ", ";
for(int j=0;j<9;j++) filestream << p2[j] << ", ";
for(int j=0;j<9;j++) filestream << p3[j] << ", ";
for(int j=0;j<9;j++) filestream << v0[j] << ", ";
for(int j=0;j<9;j++) filestream << v1[j] << ", ";
for(int j=0;j<9;j++) filestream << v2[j] << ", ";
for(int j=0;j<9;j++) filestream << v3[j] << ", ";
for(int j=0;j<9;j++) filestream << f0[j] << ", ";
for(int j=0;j<9;j++) filestream << f1[j] << ", ";
for(int j=0;j<9;j++) filestream << f2[j] << ", ";
for(int j=0;j<9;j++) filestream << f3[j] << ", ";
filestream << "\n";
}
for (int i = 0; i < body2Ds.size(); i++) {
// TODO: Need to know collision family information, density, elastic modulus, number of contacts (especially important when importing)
filestream << bodies.size()+beams.size()+plates.size()+i << ", ";
for(int j=0;j<3;j++) {
filestream << p_h[3*bodies.size()+12*beams.size()+36*plates.size()+3*i+j] << ", ";
}
for(int j=0;j<3;j++) {
filestream << v_h[3*bodies.size()+12*beams.size()+36*plates.size()+3*i+j] << ", ";
}
filestream
<< "\n";
}
filestream.close();
return 0;
}
int System::importSystem(string filename) {
double3 pos;
double3 vel;
double3 geometry = make_double3(0,0,0);
int isFixed;
string temp_data;
int numBodies;
int numBeams;
int numPlates;
int numBodies2D;
double blah;
int index;
int shape;
ifstream ifile(filename.c_str());
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss1(temp_data);
ss1>>numBodies>>numBeams>>numPlates>>numBodies2D;
Body* bodyPtr;
for(int i=0; i<numBodies; i++) {
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss(temp_data);
ss>>index>>isFixed>>pos.x>>pos.y>>pos.z>>blah>>blah>>blah>>blah>>vel.x>>vel.y>>vel.z>>shape;
if(shape == 0) {
ss>>geometry.x;
geometry.y = 0;
geometry.z = 0;
} else {
ss>>geometry.x>>geometry.y>>geometry.z;
}
bodyPtr = new Body(pos);
bodyPtr->setBodyFixed(isFixed);
bodyPtr->setGeometry(geometry);
bodyPtr->setVelocity(vel);
if(shape == 0) {
bodyPtr->setMass(2600*4.0*3.14159*pow(geometry.x,3.0)/3.0);
} else {
bodyPtr->setMass(1.0);
}
add(bodyPtr);
//cout << index << " " << isFixed << " " << pos.x << " " << pos.y << " " << pos.z << " " << "1 0 0 0 " << vel.x << " " << vel.y << " " << vel.z << " " << shape << " " << geometry.x << " " << geometry.y << " " << geometry.z << endl;
}
// TODO: IMPORT BEAMS
return 0;
}
int System::exportMatrices(string directory) {
string filename = directory + "/D.mtx";
cusp::io::write_matrix_market_file(D, filename);
filename = directory + "/Minv.mtx";
cusp::io::write_matrix_market_file(mass, filename);
filename = directory + "/r.mtx";
cusp::io::write_matrix_market_file(r, filename);
filename = directory + "/b.mtx";
cusp::io::write_matrix_market_file(b, filename);
filename = directory + "/k.mtx";
cusp::io::write_matrix_market_file(k, filename);
return 0;
}
void System::importMesh(string filename, double stiffness, int numContactPointsPerElement, double pressure) {
string temp_data;
int numShells;
int numNodes;
int numNonzeros_M;
int numNonzeros_invM;
double3 node;
int4 connectivity;
double4 material;
double4 geometry;
int map;
double force;
int iVal;
int jVal;
double val;
ifstream ifile(filename.c_str());
getline(ifile,temp_data);
for(int i=0; i<temp_data.size(); ++i){
if(temp_data[i]==','){temp_data[i]=' ';}
}
stringstream ss1(temp_data);
ss1>>numNodes>>numShells>>numNonzeros_M>>numNonzeros_invM;
// read nodes
for(int i=0; i<3*numNodes; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>node.x>>node.y>>node.z;
nodes_h.push_back(node);
}
// read shell connectivity
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>connectivity.x>>connectivity.y>>connectivity.z>>connectivity.w;
shellConnectivities_h.push_back(connectivity);
}
shellConnectivities_d = shellConnectivities_h;
// read shell materials
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>material.x>>material.y>>material.z>>material.w;
material.y = stiffness;
shellMaterials_h.push_back(material);
pressureShell_h.push_back(pressure);
}
shellMaterials_d = shellMaterials_h;
pressureShell_d = pressureShell_h;
// read shell geometries
for(int i=0; i<numShells; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>geometry.x>>geometry.y>>geometry.z>>geometry.w;
geometry.w = numContactPointsPerElement;
shellGeometries_h.push_back(geometry);
}
shellGeometries_d = shellGeometries_h;
//cout << endl;
// read shell map
for(int i=0; i<numShells*36; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>map;
shellMap_h.push_back(map);
}
shellMap_d = shellMap_h;
shellMap0_d = shellMap_h;
// read shell external force
for(int i=0; i<numNodes*9; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>force;
fextMesh_h.push_back(force);
}
// read shell mass matrix
for(int i=0; i<numNonzeros_M; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>iVal>>jVal>>val;
massShellI_h.push_back(iVal-1); // convert from 1-based indexing
massShellJ_h.push_back(jVal-1); // convert from 1-based indexing
massShell_h.push_back(val);
}
massShellI_d = massShellI_h;
massShellJ_d = massShellJ_h;
massShell_d = massShell_h;
// read shell inverse mass matrix
for(int i=0; i<numNonzeros_invM; i++) {
getline(ifile,temp_data);
for(int j=0; j<temp_data.size(); ++j){
if(temp_data[j]==','){temp_data[j]=' ';}
}
stringstream ss(temp_data);
ss>>iVal>>jVal>>val;
invMassShellI_h.push_back(iVal-1); // convert from 1-based indexing
invMassShellJ_h.push_back(jVal-1); // convert from 1-based indexing
invMassShell_h.push_back(val);
}
}
double3 System::transformNodalToCartesian_shellMesh(int shellIndex, double xi, double eta)
{
double a = shellGeometries_h[shellIndex].x;
double b = shellGeometries_h[shellIndex].y;
int offset = plates.size()*36+12*beams.size()+3*bodies.size()+3*body2Ds.size();
double* p0 = &p_h[offset+9*shellConnectivities_h[shellIndex].x];
double* p1 = &p_h[offset+9*shellConnectivities_h[shellIndex].y];
double* p2 = &p_h[offset+9*shellConnectivities_h[shellIndex].z];
double* p3 = &p_h[offset+9*shellConnectivities_h[shellIndex].w];
double3 pos;
pos.x = -eta*p2[0]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[0]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[0]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[0]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[6]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[6]*xi*(eta-1.0)+a*eta*p2[3]*(xi*xi)*(xi-1.0)+a*eta*p3[3]*xi*pow(xi-1.0,2.0)-b*eta*p0[6]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[6]*(eta-1.0)*(xi-1.0)-a*p0[3]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[3]*(xi*xi)*(eta-1.0)*(xi-1.0);
pos.y = -eta*p2[1]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[1]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[1]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[1]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[7]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[7]*xi*(eta-1.0)+a*eta*p2[4]*(xi*xi)*(xi-1.0)+a*eta*p3[4]*xi*pow(xi-1.0,2.0)-b*eta*p0[7]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[7]*(eta-1.0)*(xi-1.0)-a*p0[4]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[4]*(xi*xi)*(eta-1.0)*(xi-1.0);
pos.z = -eta*p2[2]*xi*(eta*-3.0-xi*3.0+(eta*eta)*2.0+(xi*xi)*2.0+1.0)-p1[2]*xi*(eta-1.0)*(eta+xi*3.0-(eta*eta)*2.0-(xi*xi)*2.0)-eta*p3[2]*(xi-1.0)*(eta*3.0+xi-(eta*eta)*2.0-(xi*xi)*2.0)+p0[2]*(eta-1.0)*(xi-1.0)*(eta+xi-(eta*eta)*2.0-(xi*xi)*2.0+1.0)+b*eta*p1[8]*xi*pow(eta-1.0,2.0)+b*(eta*eta)*p2[8]*xi*(eta-1.0)+a*eta*p2[5]*(xi*xi)*(xi-1.0)+a*eta*p3[5]*xi*pow(xi-1.0,2.0)-b*eta*p0[8]*pow(eta-1.0,2.0)*(xi-1.0)-b*(eta*eta)*p3[8]*(eta-1.0)*(xi-1.0)-a*p0[5]*xi*(eta-1.0)*pow(xi-1.0,2.0)-a*p1[5]*(xi*xi)*(eta-1.0)*(xi-1.0);
return pos;
}
|
e13dc694692352f5602e443631d2421901b19e86.hip | // !!! This is a file automatically generated by hipify!!!
/*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaInterpolateImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
void
CUDA_interpolation(const int4 & inputSize, float * input, float * output, int projectionNumber, float ** weights)
{
hipblasHandle_t handle;
hipblasCreate(&handle);
// CUDA device pointers
int nVoxelsOutput = inputSize.x * inputSize.y * inputSize.z;
int memorySizeOutput = nVoxelsOutput * sizeof(float);
// Reset output volume
hipMemset((void *)output, 0, memorySizeOutput);
for (int phase = 0; phase < inputSize.w; phase++)
{
float weight = weights[phase][projectionNumber];
if (weight != 0)
{
// Create a pointer to the "phase"-th volume in the input
float * p = input + phase * nVoxelsOutput;
// Add "weight" times the "phase"-th volume in the input to the output
hipblasSaxpy(handle, nVoxelsOutput, &weight, p, 1, output, 1);
}
}
// Destroy Cublas context
hipblasDestroy(handle);
CUDA_CHECK_ERROR;
}
| e13dc694692352f5602e443631d2421901b19e86.cu | /*=========================================================================
*
* Copyright RTK Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// rtk includes
#include "rtkCudaInterpolateImageFilter.hcu"
#include "rtkCudaUtilities.hcu"
#include <itkMacro.h>
// cuda includes
#include <cuda.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
void
CUDA_interpolation(const int4 & inputSize, float * input, float * output, int projectionNumber, float ** weights)
{
cublasHandle_t handle;
cublasCreate(&handle);
// CUDA device pointers
int nVoxelsOutput = inputSize.x * inputSize.y * inputSize.z;
int memorySizeOutput = nVoxelsOutput * sizeof(float);
// Reset output volume
cudaMemset((void *)output, 0, memorySizeOutput);
for (int phase = 0; phase < inputSize.w; phase++)
{
float weight = weights[phase][projectionNumber];
if (weight != 0)
{
// Create a pointer to the "phase"-th volume in the input
float * p = input + phase * nVoxelsOutput;
// Add "weight" times the "phase"-th volume in the input to the output
cublasSaxpy(handle, nVoxelsOutput, &weight, p, 1, output, 1);
}
}
// Destroy Cublas context
cublasDestroy(handle);
CUDA_CHECK_ERROR;
}
|
dba4af350642bbe23f66ad4d650788fc8fbfc54c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv_upper.cu
@generated from magmablas/zsymv_upper.cu normal z -> c, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
csymv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(ty2 + j*8, tx2) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_U
/**************************************************************
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
********************************************************************/
__global__ void
csymv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| dba4af350642bbe23f66ad4d650788fc8fbfc54c.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
csymv_upper.cu is nearly identical to chemv_upper.cu, just change names and drop MAGMA_C_CONJ.
csymv_kernel_U (upper) in csymv_upper.cu is very similar to
csymv_kernel_L (lower) in csymv.cu; diff the two files to compare.
Note: [ds] precisions generated from chemv_upper.cu
@generated from magmablas/zsymv_upper.cu normal z -> c, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define PRECISION_c
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
csymv_kernel_U(
int n,
magmaFloatComplex const * __restrict__ A, int lda,
magmaFloatComplex const * __restrict__ x, int incx,
magmaFloatComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaFloatComplex psum, psum_t;
magmaFloatComplex total = MAGMA_C_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaFloatComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaFloatComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaFloatComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaFloatComplex rA[4];
magmaFloatComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_C_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = sA32(tx2, j);
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_C_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(ty2 + j*8, tx2) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_C_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = rA[j] * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_C_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end csymv_kernel_U
/**************************************************************
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
********************************************************************/
__global__ void
csymv_kernel_U_sum(
int n,
magmaFloatComplex alpha,
int lda,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy,
magmaFloatComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaFloatComplex Ax = MAGMA_C_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
495ff761ac87ebecce36fb5b1b32ab99049f8c90.hip | // !!! This is a file automatically generated by hipify!!!
///The following functions transforms an RGB image into a Gray scale image using CUDA
#include "iostream"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <string>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
using namespace cv;
void __global__ gaussianblure(const unsigned char *pfimagIn, unsigned char *pfimgOut, const int iWidth, const int iHeight, const int iChannels);
extern "C"
void ImageProcessingGPU(Mat image);
#define BLOCK_SIZE 8
void ImageProcessingGPU(Mat image)
{
//Image dimensions and channels
int iWidth = image.cols;
int iHeight = image.rows;
int iCn = image.channels();
//Total number of Bytes of the image
size_t count = iWidth * iHeight * iCn * sizeof(unsigned char);
//Input image is the one we read from file and want to change
unsigned char *h_ImagIn = image.data;
//The buffers in the GPU
unsigned char *d_ImagIn;
unsigned char *d_ImagOut;
//Allocate memory in GPU
checkCudaErrors(hipMalloc((void **)&d_ImagIn, count));
checkCudaErrors(hipMalloc((void **)&d_ImagOut, count));
//Test, pass this to the output to test if the cudamemcpy works
//error = hipMemset((void *)d_ImagOut, 255, count);
//Transfer data to GPU
checkCudaErrors(hipMemcpy((void *)d_ImagIn, (void *)h_ImagIn, count, hipMemcpyHostToDevice));
//Compue the results in GPU
dim3 dNumThreadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // Each thread block contains this much threads
// This amount of thread blocks
//Total number of threads that will be launched are dimGrid.x * dimGird.y * dimBlocks.x * dimBlocks.y
//NOTE: the toal numer of thread per block, i.e. dimBlock.x * dimBlock.y should not excede 1024 and
//in some system 512
dim3 dNumBlocks(iWidth / dNumThreadsPerBlock.x, iHeight / dNumThreadsPerBlock.y);
//GPU Kernel
hipLaunchKernelGGL(( gaussianblure) , dim3(dNumBlocks), dim3(dNumThreadsPerBlock) , 0, 0, d_ImagIn, d_ImagOut, iWidth, iHeight, iCn);
//Transfer data back from GPU to CPU
checkCudaErrors(hipMemcpy((void *)h_ImagIn, (void *)d_ImagOut, count, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_ImagIn));
checkCudaErrors(hipFree(d_ImagOut));
}
//GPU Kernel
void __global__ gaussianblure(const unsigned char *pfimagIn, unsigned char *pfimgOut, const int iWidth, const int iHeight, const int iChannels)
{
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
double dbFilterCoeff[9] = { -0.0833333333333333, -0.0833333333333333, -0.0833333333333333,
-0.0833333333333333, 1.66666666666667, -0.0833333333333333,
-0.0833333333333333, -0.0833333333333333, -0.0833333333333333 };
// The B value of the pixel, images in the OpenCV are in BGR format not RGB
//long long lPixelIdx = (iRow * iWidth + iCol ) * iChannels;
short sFilterRow = 0;
short sFilterCol = 0;
double dbFilterOutputB = 0.0;
double dbFilterOutputG = 0.0;
double dbFilterOutputR = 0.0;
for (int iFilterCnt = 0; iFilterCnt < 9; ++iFilterCnt)
{
sFilterRow = iFilterCnt / 3;
sFilterCol = iFilterCnt / 3;
int lImageRow = iRow + sFilterRow - 1;
int lImageCol = iCol + sFilterCol - 1;
if (lImageRow < 0 || lImageRow >= iHeight || lImageCol < 0 || lImageCol >= iWidth)
continue;
dbFilterOutputB += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels];
dbFilterOutputG += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels + 1];
dbFilterOutputR += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels + 2];
}
if (dbFilterOutputB < 0.0)
dbFilterOutputB = 0.0;
else if (dbFilterOutputB >= 255.0)
dbFilterOutputB = 255.0;
if (dbFilterOutputG < 0.0)
dbFilterOutputG = 0.0;
else if (dbFilterOutputG >= 255.0)
dbFilterOutputG = 255.0;
if (dbFilterOutputR < 0.0)
dbFilterOutputR = 0.0;
else if (dbFilterOutputR >= 255.0)
dbFilterOutputR = 255.0;
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 0] = dbFilterOutputB; //B
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 1] = dbFilterOutputG; //G
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 2] = dbFilterOutputR; //R
}
| 495ff761ac87ebecce36fb5b1b32ab99049f8c90.cu | ///The following functions transforms an RGB image into a Gray scale image using CUDA
#include "iostream"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <string>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
using namespace cv;
void __global__ gaussianblure(const unsigned char *pfimagIn, unsigned char *pfimgOut, const int iWidth, const int iHeight, const int iChannels);
extern "C"
void ImageProcessingGPU(Mat image);
#define BLOCK_SIZE 8
void ImageProcessingGPU(Mat image)
{
//Image dimensions and channels
int iWidth = image.cols;
int iHeight = image.rows;
int iCn = image.channels();
//Total number of Bytes of the image
size_t count = iWidth * iHeight * iCn * sizeof(unsigned char);
//Input image is the one we read from file and want to change
unsigned char *h_ImagIn = image.data;
//The buffers in the GPU
unsigned char *d_ImagIn;
unsigned char *d_ImagOut;
//Allocate memory in GPU
checkCudaErrors(cudaMalloc((void **)&d_ImagIn, count));
checkCudaErrors(cudaMalloc((void **)&d_ImagOut, count));
//Test, pass this to the output to test if the cudamemcpy works
//error = cudaMemset((void *)d_ImagOut, 255, count);
//Transfer data to GPU
checkCudaErrors(cudaMemcpy((void *)d_ImagIn, (void *)h_ImagIn, count, cudaMemcpyHostToDevice));
//Compue the results in GPU
dim3 dNumThreadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // Each thread block contains this much threads
// This amount of thread blocks
//Total number of threads that will be launched are dimGrid.x * dimGird.y * dimBlocks.x * dimBlocks.y
//NOTE: the toal numer of thread per block, i.e. dimBlock.x * dimBlock.y should not excede 1024 and
//in some system 512
dim3 dNumBlocks(iWidth / dNumThreadsPerBlock.x, iHeight / dNumThreadsPerBlock.y);
//GPU Kernel
gaussianblure <<< dNumBlocks, dNumThreadsPerBlock >>> (d_ImagIn, d_ImagOut, iWidth, iHeight, iCn);
//Transfer data back from GPU to CPU
checkCudaErrors(cudaMemcpy((void *)h_ImagIn, (void *)d_ImagOut, count, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_ImagIn));
checkCudaErrors(cudaFree(d_ImagOut));
}
//GPU Kernel
void __global__ gaussianblure(const unsigned char *pfimagIn, unsigned char *pfimgOut, const int iWidth, const int iHeight, const int iChannels)
{
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
double dbFilterCoeff[9] = { -0.0833333333333333, -0.0833333333333333, -0.0833333333333333,
-0.0833333333333333, 1.66666666666667, -0.0833333333333333,
-0.0833333333333333, -0.0833333333333333, -0.0833333333333333 };
// The B value of the pixel, images in the OpenCV are in BGR format not RGB
//long long lPixelIdx = (iRow * iWidth + iCol ) * iChannels;
short sFilterRow = 0;
short sFilterCol = 0;
double dbFilterOutputB = 0.0;
double dbFilterOutputG = 0.0;
double dbFilterOutputR = 0.0;
for (int iFilterCnt = 0; iFilterCnt < 9; ++iFilterCnt)
{
sFilterRow = iFilterCnt / 3;
sFilterCol = iFilterCnt / 3;
int lImageRow = iRow + sFilterRow - 1;
int lImageCol = iCol + sFilterCol - 1;
if (lImageRow < 0 || lImageRow >= iHeight || lImageCol < 0 || lImageCol >= iWidth)
continue;
dbFilterOutputB += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels];
dbFilterOutputG += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels + 1];
dbFilterOutputR += dbFilterCoeff[sFilterRow * 3 + sFilterCol] * (float)pfimagIn[lImageRow * iWidth * iChannels + lImageCol * iChannels + 2];
}
if (dbFilterOutputB < 0.0)
dbFilterOutputB = 0.0;
else if (dbFilterOutputB >= 255.0)
dbFilterOutputB = 255.0;
if (dbFilterOutputG < 0.0)
dbFilterOutputG = 0.0;
else if (dbFilterOutputG >= 255.0)
dbFilterOutputG = 255.0;
if (dbFilterOutputR < 0.0)
dbFilterOutputR = 0.0;
else if (dbFilterOutputR >= 255.0)
dbFilterOutputR = 255.0;
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 0] = dbFilterOutputB; //B
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 1] = dbFilterOutputG; //G
pfimgOut[iRow * iWidth * iChannels + iCol * iChannels + 2] = dbFilterOutputR; //R
}
|
3193e8cdc6ed8f3010e8659d8912829592a1be27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on embLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "layer_norm.cuh"
#include "embed_layer_norm_impl.h"
#include <hip/hip_fp16.h>
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <unsigned TPB>
__global__ void MaskIndexKernelSmall(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = hipcub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
hipcub::Min min;
int thread_data(sequence_length);
const int idx = offset + threadIdx.x;
if (threadIdx.x < sequence_length) {
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = threadIdx.x;
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
template <unsigned TPB>
__global__ void MaskIndexKernel(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = hipcub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
hipcub::Min min;
int thread_data(sequence_length);
for (int i = threadIdx.x; i < sequence_length; i += TPB) {
const int idx = offset + i;
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = min(thread_data, i);
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
inline bool ComputeMaskIndex(hipStream_t stream, const int sequence_length, const int batch_size, const int* mask, int* mask_index) {
// Mask idx is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
// Assume n = batch_size x sequence_length
if (sequence_length <= 32) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<32>), dim3(batch_size), dim3(32), 0, stream, sequence_length, mask, mask_index);
} else if (sequence_length <= 128) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<128>), dim3(batch_size), dim3(128), 0, stream, sequence_length, mask, mask_index);
} else if (sequence_length == 384) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<384>), dim3(batch_size), dim3(384), 0, stream, sequence_length, mask, mask_index);
} else {
hipLaunchKernelGGL(( MaskIndexKernel<256>), dim3(batch_size), dim3(256), 0, stream, sequence_length, mask, mask_index);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void EmbedLayerNormKernel(
int hidden_size, const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output, T* embedding_sum, const int* position_ids) {
KeyValuePairSum pair_sum;
// 1. lookup word and segment of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = sequence_length
// gridDim.y = batch_size
__shared__ int word_id;
__shared__ int segment_id;
__shared__ int position_id;
const T rld = T(1.f / hidden_size);
const int sequence_position = blockIdx.y * gridDim.x + blockIdx.x;
if (threadIdx.x == 0) {
word_id = input_ids[sequence_position];
if (nullptr == segment_ids) {
segment_id = 0;
} else {
segment_id = segment_ids[sequence_position];
}
if (nullptr == position_ids) {
position_id = blockIdx.x;
} else {
position_id = position_ids[sequence_position];
}
}
__syncthreads();
// 2. load pos/segment/word embeddings and add them toghether
// offset into embeddings is given by word_id * hidden_size
const int position_offset = position_id * hidden_size;
const int word_offset = word_id * hidden_size;
const int segment_offset = segment_id * hidden_size;
// the output offset is given by b * (sequence_length * hidden_size) + s * hidden_size
const int output_offset = sequence_position * hidden_size;
hipcub::KeyValuePair<T, T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden_size; it += TPB) {
const T w(word_embedding[word_offset + it]);
T t(0);
if (nullptr != segment_embedding)
t = segment_embedding[segment_offset + it];
const T p(position_embedding[position_offset + it]);
const T val = w + t + p;
output[output_offset + it] = val;
if (embedding_sum != nullptr) {
embedding_sum[output_offset + it] = val;
}
const T rldval = rld * val;
thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val));
}
// 3. layer norm on the sum
LayerNorm<T, TPB>(thread_data, hidden_size, output_offset, beta, gamma, epsilon, output);
}
template <typename T>
bool EmbedSkipLayerNorm(
hipStream_t stream, int hidden_size, int batch_size, int sequence_length,
const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output, T* embedding_sum, const int* position_ids) {
constexpr int tpb = 256;
const dim3 grid(sequence_length, batch_size, 1);
const dim3 block(tpb, 1, 1);
hipLaunchKernelGGL(( EmbedLayerNormKernel<T, tpb>)
, dim3(grid), dim3(block), 0, stream, hidden_size, input_ids, segment_ids, beta, gamma, word_embedding, position_embedding, segment_embedding, epsilon, output, embedding_sum, position_ids);
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchEmbedLayerNormKernel(
hipStream_t stream,
void* output,
void* mask_index,
const int* input_ids,
const int* segment_ids,
const int* input_mask,
const void* gamma,
const void* beta,
const void* word_embedding,
const void* position_embedding,
const void* segment_embedding,
float epsilon,
const int hidden_size,
int batch_size,
int sequence_length,
const size_t element_size,
void* embedding_sum,
const int* position_ids) {
if (nullptr == input_mask) {
if (!CUDA_CALL(hipMemsetAsync(mask_index, 0, sizeof(int) * batch_size, stream)))
return false;
} else if (!ComputeMaskIndex(stream, sequence_length, batch_size, input_mask, static_cast<int*>(mask_index))) {
return false;
}
if (element_size == 2) {
return EmbedSkipLayerNorm<half>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(word_embedding), reinterpret_cast<const half*>(position_embedding),
reinterpret_cast<const half*>(segment_embedding), __float2half_rn(epsilon),
reinterpret_cast<half*>(output), reinterpret_cast<half*>(embedding_sum), position_ids);
} else {
return EmbedSkipLayerNorm<float>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma),
reinterpret_cast<const float*>(word_embedding), reinterpret_cast<const float*>(position_embedding),
reinterpret_cast<const float*>(segment_embedding), epsilon,
reinterpret_cast<float*>(output), reinterpret_cast<float*>(embedding_sum), position_ids);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 3193e8cdc6ed8f3010e8659d8912829592a1be27.cu | /*
The implementation of this file is based on embLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "layer_norm.cuh"
#include "embed_layer_norm_impl.h"
#include <cuda_fp16.h>
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <unsigned TPB>
__global__ void MaskIndexKernelSmall(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = cub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
cub::Min min;
int thread_data(sequence_length);
const int idx = offset + threadIdx.x;
if (threadIdx.x < sequence_length) {
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = threadIdx.x;
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
template <unsigned TPB>
__global__ void MaskIndexKernel(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = cub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
cub::Min min;
int thread_data(sequence_length);
for (int i = threadIdx.x; i < sequence_length; i += TPB) {
const int idx = offset + i;
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = min(thread_data, i);
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
inline bool ComputeMaskIndex(cudaStream_t stream, const int sequence_length, const int batch_size, const int* mask, int* mask_index) {
// Mask idx is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
// Assume n = batch_size x sequence_length
if (sequence_length <= 32) {
MaskIndexKernelSmall<32><<<batch_size, 32, 0, stream>>>(sequence_length, mask, mask_index);
} else if (sequence_length <= 128) {
MaskIndexKernelSmall<128><<<batch_size, 128, 0, stream>>>(sequence_length, mask, mask_index);
} else if (sequence_length == 384) {
MaskIndexKernelSmall<384><<<batch_size, 384, 0, stream>>>(sequence_length, mask, mask_index);
} else {
MaskIndexKernel<256><<<batch_size, 256, 0, stream>>>(sequence_length, mask, mask_index);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void EmbedLayerNormKernel(
int hidden_size, const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output, T* embedding_sum, const int* position_ids) {
KeyValuePairSum pair_sum;
// 1. lookup word and segment of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = sequence_length
// gridDim.y = batch_size
__shared__ int word_id;
__shared__ int segment_id;
__shared__ int position_id;
const T rld = T(1.f / hidden_size);
const int sequence_position = blockIdx.y * gridDim.x + blockIdx.x;
if (threadIdx.x == 0) {
word_id = input_ids[sequence_position];
if (nullptr == segment_ids) {
segment_id = 0;
} else {
segment_id = segment_ids[sequence_position];
}
if (nullptr == position_ids) {
position_id = blockIdx.x;
} else {
position_id = position_ids[sequence_position];
}
}
__syncthreads();
// 2. load pos/segment/word embeddings and add them toghether
// offset into embeddings is given by word_id * hidden_size
const int position_offset = position_id * hidden_size;
const int word_offset = word_id * hidden_size;
const int segment_offset = segment_id * hidden_size;
// the output offset is given by b * (sequence_length * hidden_size) + s * hidden_size
const int output_offset = sequence_position * hidden_size;
cub::KeyValuePair<T, T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden_size; it += TPB) {
const T w(word_embedding[word_offset + it]);
T t(0);
if (nullptr != segment_embedding)
t = segment_embedding[segment_offset + it];
const T p(position_embedding[position_offset + it]);
const T val = w + t + p;
output[output_offset + it] = val;
if (embedding_sum != nullptr) {
embedding_sum[output_offset + it] = val;
}
const T rldval = rld * val;
thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val));
}
// 3. layer norm on the sum
LayerNorm<T, TPB>(thread_data, hidden_size, output_offset, beta, gamma, epsilon, output);
}
template <typename T>
bool EmbedSkipLayerNorm(
cudaStream_t stream, int hidden_size, int batch_size, int sequence_length,
const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output, T* embedding_sum, const int* position_ids) {
constexpr int tpb = 256;
const dim3 grid(sequence_length, batch_size, 1);
const dim3 block(tpb, 1, 1);
EmbedLayerNormKernel<T, tpb>
<<<grid, block, 0, stream>>>(hidden_size, input_ids, segment_ids, beta, gamma, word_embedding, position_embedding, segment_embedding, epsilon, output, embedding_sum, position_ids);
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchEmbedLayerNormKernel(
cudaStream_t stream,
void* output,
void* mask_index,
const int* input_ids,
const int* segment_ids,
const int* input_mask,
const void* gamma,
const void* beta,
const void* word_embedding,
const void* position_embedding,
const void* segment_embedding,
float epsilon,
const int hidden_size,
int batch_size,
int sequence_length,
const size_t element_size,
void* embedding_sum,
const int* position_ids) {
if (nullptr == input_mask) {
if (!CUDA_CALL(cudaMemsetAsync(mask_index, 0, sizeof(int) * batch_size, stream)))
return false;
} else if (!ComputeMaskIndex(stream, sequence_length, batch_size, input_mask, static_cast<int*>(mask_index))) {
return false;
}
if (element_size == 2) {
return EmbedSkipLayerNorm<half>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(word_embedding), reinterpret_cast<const half*>(position_embedding),
reinterpret_cast<const half*>(segment_embedding), __float2half_rn(epsilon),
reinterpret_cast<half*>(output), reinterpret_cast<half*>(embedding_sum), position_ids);
} else {
return EmbedSkipLayerNorm<float>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma),
reinterpret_cast<const float*>(word_embedding), reinterpret_cast<const float*>(position_embedding),
reinterpret_cast<const float*>(segment_embedding), epsilon,
reinterpret_cast<float*>(output), reinterpret_cast<float*>(embedding_sum), position_ids);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
8697f3ab9b3d9423f600eb7a8283ca62a3c33719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define RADIX_BITS 4
#define RADIX_SIZE (1<<RADIX_BITS)
#define RADIX_MASK(n) ((RADIX_SIZE-1) << (n*RADIX_BITS))
#define RADIX_DIGITS(T) (bitsof(T)/RADIX_BITS)
// works when length on axis is within max allowed threads in block (1024)
KERNEL void k_topk_dense(
$dims
// size_t dims_1, ssize_t dims_2, ... , dims_$${NDIM}
$dstv
// INPUT_TYPE *dstv
$dstv_strides
// ssize_t dstv_strides_0, ssize_t dstv_strides_1, ... , dstv_strides_$${NDIM}
$dsti
// INDEX_TYPE *dsti
$dsti_strides
// ssize_t dsti_strides_0, ssize_t dsti_strides_1, ... , dsti_strides_$${NDIM}
ssize_t k,
INPUT_TYPE* src,
$src_strides
// ssize_t src_strides_0, ssize_t src_strides_1, ... , src_strides_$${NDIM}
size_t size) {
__shared__ int smem[32 * RADIX_SIZE];
__shared__ int k2;
const unsigned int idx = threadIdx.x;
bool is_topk= (idx < size);
bool is_topkth = is_topk;
size_t out_idx;
const unsigned char warp_id = idx / GA_WARP_SIZE;
// 0. get the slice for thread block to work on
size_t gid = blockIdx.x, gidx;
$set_slice
// $$set_slice expands into:
//for(int i=1; i<NDIM; i++) {
// gidx = gid % dims_$${i};
// gid /= dims_$${i};
// dsti = ptr_add(dsti, gidx*dsti_strides_$${i};
// dstv = ptr_add(dstv, gidx*dstv_strides_$${i};
// src = ptr_add(src, gidx*src_strides_$${i});
//}
// get input and its radix friendly form
const INPUT_TYPE xval = is_topk ? ptr_at(src, idx*src_strides_0) : (INPUT_TYPE)0;
radix_t x = RadixConfig<INPUT_TYPE>::convert(xval);
// resolve negative k
if (k<0) { x = ~x; k = -k; }
if (idx==0)
k2 = k;
// 1. filter is_topk and is_topkth using radix select
#pragma unroll
for (int i=bitsof(INPUT_TYPE)-RADIX_BITS; i>=0; i-=RADIX_BITS) {
const int digit = Bitfield<radix_t>::get(x, i, RADIX_BITS);
/*int digit = (x>>i) & (RADIX_SIZE-1);*/
// count within warp
#pragma unroll
for (int bin=0; bin<RADIX_SIZE; ++bin) {
bool vote = (bin == digit) && is_topkth;
unsigned int votes = __ballot(vote);
if (lane_id()==0)
smem[bin + RADIX_SIZE*warp_id] = __popc(votes);
}
local_barrier();
// sum counts across all warps
if (idx < RADIX_SIZE) {
int sum = smem[idx];
#pragma unroll
for(int w=RADIX_SIZE; w<blockDim.x*RADIX_SIZE / GA_WARP_SIZE; w+=RADIX_SIZE)
sum += smem[idx + w];
smem[idx] = sum;
}
local_barrier();
// find the bucket and update k2
// smem[:RADIX_SIZE:-1] = k2 - cumsum(smem[:RADIX_SIZE-1:-1])
if (idx == 0) {
int sum = k2;
#pragma unroll
for (int bin=RADIX_SIZE-1; bin>=0; --bin) {
sum -= smem[bin];
smem[bin] = sum;
k2 = (sum > 0) ? sum : k2;
}
smem[RADIX_SIZE] = 1;
}
local_barrier();
if (is_topkth) {
is_topk &= (smem[digit+1] > 0);
is_topkth &= (smem[digit] <= 0) && (smem[digit+1] > 0);
}
local_barrier();
}
// set k2 as number of exceeding values
if (idx==0) {
#pragma unroll
for (int bin=RADIX_SIZE-1; bin>=0; --bin) {
if (smem[bin] <= 0)
break;
k2 = smem[bin];
}
}
local_barrier();
// 2. find the index of output array, if exists
if (k2 != 0) {
// top_kth value may not be unique, so we need to
// perform binary cumsum on is_topkth to drop exceeding top-kth values
out_idx = binary_cumsum_exclusive(idx, warp_id, smem, is_topkth);
if ((out_idx >= k2) && is_topkth)
is_topk = false;
local_barrier();
}
// perform binary cumsum on is_topk to determine the indices to put result
out_idx = binary_cumsum_exclusive(idx, warp_id, smem, is_topk);
if (is_topk) {
#if WRITE_VALUE == 1
ptr_at(dstv, out_idx * dstv_strides_0) = xval;
#endif
#if WRITE_INDEX == 1
ptr_at(dsti, out_idx * dsti_strides_0) = (INDEX_TYPE)idx;
#endif
}
}
| 8697f3ab9b3d9423f600eb7a8283ca62a3c33719.cu | #define RADIX_BITS 4
#define RADIX_SIZE (1<<RADIX_BITS)
#define RADIX_MASK(n) ((RADIX_SIZE-1) << (n*RADIX_BITS))
#define RADIX_DIGITS(T) (bitsof(T)/RADIX_BITS)
// works when length on axis is within max allowed threads in block (1024)
KERNEL void k_topk_dense(
$dims
// size_t dims_1, ssize_t dims_2, ... , dims_$${NDIM}
$dstv
// INPUT_TYPE *dstv
$dstv_strides
// ssize_t dstv_strides_0, ssize_t dstv_strides_1, ... , dstv_strides_$${NDIM}
$dsti
// INDEX_TYPE *dsti
$dsti_strides
// ssize_t dsti_strides_0, ssize_t dsti_strides_1, ... , dsti_strides_$${NDIM}
ssize_t k,
INPUT_TYPE* src,
$src_strides
// ssize_t src_strides_0, ssize_t src_strides_1, ... , src_strides_$${NDIM}
size_t size) {
__shared__ int smem[32 * RADIX_SIZE];
__shared__ int k2;
const unsigned int idx = threadIdx.x;
bool is_topk= (idx < size);
bool is_topkth = is_topk;
size_t out_idx;
const unsigned char warp_id = idx / GA_WARP_SIZE;
// 0. get the slice for thread block to work on
size_t gid = blockIdx.x, gidx;
$set_slice
// $$set_slice expands into:
//for(int i=1; i<NDIM; i++) {
// gidx = gid % dims_$${i};
// gid /= dims_$${i};
// dsti = ptr_add(dsti, gidx*dsti_strides_$${i};
// dstv = ptr_add(dstv, gidx*dstv_strides_$${i};
// src = ptr_add(src, gidx*src_strides_$${i});
//}
// get input and its radix friendly form
const INPUT_TYPE xval = is_topk ? ptr_at(src, idx*src_strides_0) : (INPUT_TYPE)0;
radix_t x = RadixConfig<INPUT_TYPE>::convert(xval);
// resolve negative k
if (k<0) { x = ~x; k = -k; }
if (idx==0)
k2 = k;
// 1. filter is_topk and is_topkth using radix select
#pragma unroll
for (int i=bitsof(INPUT_TYPE)-RADIX_BITS; i>=0; i-=RADIX_BITS) {
const int digit = Bitfield<radix_t>::get(x, i, RADIX_BITS);
/*int digit = (x>>i) & (RADIX_SIZE-1);*/
// count within warp
#pragma unroll
for (int bin=0; bin<RADIX_SIZE; ++bin) {
bool vote = (bin == digit) && is_topkth;
unsigned int votes = __ballot(vote);
if (lane_id()==0)
smem[bin + RADIX_SIZE*warp_id] = __popc(votes);
}
local_barrier();
// sum counts across all warps
if (idx < RADIX_SIZE) {
int sum = smem[idx];
#pragma unroll
for(int w=RADIX_SIZE; w<blockDim.x*RADIX_SIZE / GA_WARP_SIZE; w+=RADIX_SIZE)
sum += smem[idx + w];
smem[idx] = sum;
}
local_barrier();
// find the bucket and update k2
// smem[:RADIX_SIZE:-1] = k2 - cumsum(smem[:RADIX_SIZE-1:-1])
if (idx == 0) {
int sum = k2;
#pragma unroll
for (int bin=RADIX_SIZE-1; bin>=0; --bin) {
sum -= smem[bin];
smem[bin] = sum;
k2 = (sum > 0) ? sum : k2;
}
smem[RADIX_SIZE] = 1;
}
local_barrier();
if (is_topkth) {
is_topk &= (smem[digit+1] > 0);
is_topkth &= (smem[digit] <= 0) && (smem[digit+1] > 0);
}
local_barrier();
}
// set k2 as number of exceeding values
if (idx==0) {
#pragma unroll
for (int bin=RADIX_SIZE-1; bin>=0; --bin) {
if (smem[bin] <= 0)
break;
k2 = smem[bin];
}
}
local_barrier();
// 2. find the index of output array, if exists
if (k2 != 0) {
// top_kth value may not be unique, so we need to
// perform binary cumsum on is_topkth to drop exceeding top-kth values
out_idx = binary_cumsum_exclusive(idx, warp_id, smem, is_topkth);
if ((out_idx >= k2) && is_topkth)
is_topk = false;
local_barrier();
}
// perform binary cumsum on is_topk to determine the indices to put result
out_idx = binary_cumsum_exclusive(idx, warp_id, smem, is_topk);
if (is_topk) {
#if WRITE_VALUE == 1
ptr_at(dstv, out_idx * dstv_strides_0) = xval;
#endif
#if WRITE_INDEX == 1
ptr_at(dsti, out_idx * dsti_strides_0) = (INDEX_TYPE)idx;
#endif
}
}
|
f6bf317d5407ef64fe42582502bec9c6dc3401e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "ComputeThermoGPU.cuh"
#include "VectorMath.h"
#include <assert.h>
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar4 compute_thermo_final_sdata[];
//! Shared memory used in reducing the sums of the pressure tensor
extern __shared__ Scalar compute_pressure_tensor_sdata[];
//! Shared memory used in reducing the sum of the rotational kinetic energy
extern __shared__ Scalar compute_ke_rot_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_velocity Particle velocity and mass array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar4 to keep pointer management down.
- 2*Kinetic energy is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into shared memory
and then the block performs a reduction in parallel to produce a partial sum output for the block. These
partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed
for this kernel to run.
*/
__global__ void gpu_compute_thermo_partial_sums(Scalar4 *d_scratch,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int virial_pitch,
Scalar4 *d_velocity,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
// update positions to the next timestep and update velocities to the next half step
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0/3.0)*
(d_net_virial[0*virial_pitch+idx] // xx
+d_net_virial[3*virial_pitch+idx] // yy
+d_net_virial[5*virial_pitch+idx]); // zz
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
// compute our contribution to the sum
my_element.x = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_sdata[threadIdx.x].x += compute_thermo_sdata[threadIdx.x + offs].x;
compute_thermo_sdata[threadIdx.x].y += compute_thermo_sdata[threadIdx.x + offs].y;
compute_thermo_sdata[threadIdx.x].z += compute_thermo_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar4(res.x, res.y, res.z, 0);
}
}
//! Perform partial sums of the pressure tensor on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_velocity Particle velocity and mass array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group
\param offset Offset of this GPU in the list of group members
\param block_offset Offset of this GPU in the array of partial sums
\param num_blocks Total number of partial sums by all GPUs
One thread is executed per group member. That thread reads in the six values (components of the pressure tensor)
for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block.
These partial sums are written to d_scratch[i*gridDim.x + blockIdx.x], where i=0..5 is the index of the component.
For this kernel to run, 6*sizeof(Scalar)*block_size of dynamic shared memory are needed.
*/
__global__ void gpu_compute_pressure_tensor_partial_sums(Scalar *d_scratch,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int virial_pitch,
Scalar4 *d_velocity,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset,
unsigned int num_blocks)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar my_element[6]; // element of scratch space read in
// non-participating threads: contribute 0 to the sum
my_element[0] = 0;
my_element[1] = 0;
my_element[2] = 0;
my_element[3] = 0;
my_element[4] = 0;
my_element[5] = 0;
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
// compute contribution to pressure tensor and store it in my_element
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
my_element[0] = mass*vel.x*vel.x + d_net_virial[0*virial_pitch+idx]; // xx
my_element[1] = mass*vel.x*vel.y + d_net_virial[1*virial_pitch+idx]; // xy
my_element[2] = mass*vel.x*vel.z + d_net_virial[2*virial_pitch+idx]; // xz
my_element[3] = mass*vel.y*vel.y + d_net_virial[3*virial_pitch+idx]; // yy
my_element[4] = mass*vel.y*vel.z + d_net_virial[4*virial_pitch+idx]; // yz
my_element[5] = mass*vel.z*vel.z + d_net_virial[5*virial_pitch+idx]; // zz
}
}
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] = my_element[i];
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < 6; i++)
d_scratch[num_blocks * i + blockIdx.x + block_offset] = compute_pressure_tensor_sdata[i*blockDim.x];
}
}
//! Perform partial sums of the rotational KE on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_orientation Orientation quaternions from ParticleData
\param d_angmom Conjugate quaternions from ParticleData
\param d_inertia Moments of inertia from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group processed by this GPU
\param offset Offset of this GPU in the list of group members
\param block_offset Output offset of this GPU
*/
__global__ void gpu_compute_rotational_ke_partial_sums(Scalar *d_scratch,
const Scalar4 *d_orientation,
const Scalar4 *d_angmom,
const Scalar3 *d_inertia,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = Scalar(0.0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> I(d_inertia[idx]);
quat<Scalar> s(Scalar(0.5)*conj(q)*p);
Scalar ke_rot(0.0);
if (I.x >= EPSILON)
{
ke_rot += s.v.x*s.v.x/I.x;
}
if (I.y >= EPSILON)
{
ke_rot += s.v.y*s.v.y/I.y;
}
if (I.z >= EPSILON)
{
ke_rot += s.v.z*s.v.z/I.z;
}
// compute our contribution to the sum
my_element = ke_rot*Scalar(1.0/2.0);
}
}
compute_ke_rot_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
compute_ke_rot_sdata[threadIdx.x] += compute_ke_rot_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_scratch[blockIdx.x + block_offset] = compute_ke_rot_sdata[0];
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param d_scratch_rot Partial sums of rotational kinetic energy
\param ndof Number of degrees of freedom this group possesses
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
sizeof(Scalar4)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_final_sums(Scalar *d_properties,
Scalar4 *d_scratch,
Scalar *d_scratch_rot,
unsigned int ndof,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar external_virial,
Scalar external_energy
)
{
Scalar4 final_sum = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar4 scratch = d_scratch[start + threadIdx.x];
Scalar scratch_rot = d_scratch_rot[start + threadIdx.x];
compute_thermo_final_sdata[threadIdx.x] = make_scalar4(scratch.x, scratch.y, scratch.z, scratch_rot);
}
else
compute_thermo_final_sdata[threadIdx.x] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_final_sdata[threadIdx.x].x += compute_thermo_final_sdata[threadIdx.x + offs].x;
compute_thermo_final_sdata[threadIdx.x].y += compute_thermo_final_sdata[threadIdx.x + offs].y;
compute_thermo_final_sdata[threadIdx.x].z += compute_thermo_final_sdata[threadIdx.x + offs].z;
compute_thermo_final_sdata[threadIdx.x].w += compute_thermo_final_sdata[threadIdx.x + offs].w;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_final_sdata[0].x;
final_sum.y += compute_thermo_final_sdata[0].y;
final_sum.z += compute_thermo_final_sdata[0].z;
final_sum.w += compute_thermo_final_sdata[0].w;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar ke_trans_total = final_sum.x * Scalar(0.5);
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
Scalar ke_rot_total = final_sum.w;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0)/Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar pressure = (Scalar(2.0) * ke_trans_total / Scalar(D) + W) / volume;
// fill out the GPUArray
d_properties[thermo_index::translational_kinetic_energy] = Scalar(ke_trans_total);
d_properties[thermo_index::rotational_kinetic_energy] = Scalar(ke_rot_total);
d_properties[thermo_index::potential_energy] = Scalar(pe_total);
d_properties[thermo_index::pressure] = pressure;
}
}
//! Complete partial sums and compute final pressure tensor
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param external_virial_xx External contribution to virial (xx component)
\param external_virial_xy External contribution to virial (xy component)
\param external_virial_xz External contribution to virial (xz component)
\param external_virial_yy External contribution to virial (yy component)
\param external_virial_yz External contribution to virial (yz component)
\param external_virial_zz External contribution to virial (zz component)
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
6*sizeof(Scalar)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_pressure_tensor_final_sums(Scalar *d_properties,
Scalar *d_scratch,
BoxDim box,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar external_virial_xx,
Scalar external_virial_xy,
Scalar external_virial_xz,
Scalar external_virial_yy,
Scalar external_virial_yz,
Scalar external_virial_zz,
bool twod)
{
Scalar final_sum[6];
final_sum[0] = external_virial_xx;
final_sum[1] = external_virial_xy;
final_sum[2] = external_virial_xz;
final_sum[3] = external_virial_yy;
final_sum[4] = external_virial_yz;
final_sum[5] = external_virial_zz;
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = d_scratch[i*num_partial_sums + start + threadIdx.x];
}
else
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < 6; i++)
final_sum[i] += compute_pressure_tensor_sdata[i*blockDim.x];
}
}
if (threadIdx.x == 0)
{
// fill out the GPUArray
// we have thus far calculated the sum of the kinetic part of the pressure tensor
// and the virial part, the definition includes an inverse factor of the box volume
Scalar V = box.getVolume(twod);
d_properties[thermo_index::pressure_xx] = final_sum[0]/V;
d_properties[thermo_index::pressure_xy] = final_sum[1]/V;
d_properties[thermo_index::pressure_xz] = final_sum[2]/V;
d_properties[thermo_index::pressure_yy] = final_sum[3]/V;
d_properties[thermo_index::pressure_yz] = final_sum[4]/V;
d_properties[thermo_index::pressure_zz] = final_sum[5]/V;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_properties Array to write computed properties
\param d_vel particle velocities and masses on the GPU
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param compute_pressure_tensor whether to compute the full pressure tensor
\param compute_rotational_energy whether to compute the rotational kinetic energy
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_partial(Scalar *d_properties,
Scalar4 *d_vel,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_args& args,
bool compute_pressure_tensor,
bool compute_rotational_energy,
const GPUPartition& gpu_partition
)
{
assert(d_properties);
assert(d_vel);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork/args.block_size+1, 1, 1);
dim3 threads(args.block_size, 1, 1);
unsigned int shared_bytes = sizeof(Scalar3)*args.block_size;
hipLaunchKernelGGL(( gpu_compute_thermo_partial_sums), dim3(grid),dim3(threads), shared_bytes, 0, args.d_scratch,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_vel,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
if (compute_pressure_tensor)
{
assert(args.d_scratch_pressure_tensor);
shared_bytes = 6 * sizeof(Scalar) * args.block_size;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_pressure_tensor_partial_sums), dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_pressure_tensor,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_vel,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset,
args.n_blocks);
}
if (compute_rotational_energy)
{
assert(args.d_scratch_pressure_tensor);
shared_bytes = sizeof(Scalar) * args.block_size;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_rotational_ke_partial_sums), dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch_rot,
args.d_orientation,
args.d_angmom,
args.d_inertia,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
}
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return hipSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_vel particle velocities and masses on the GPU
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param compute_pressure_tensor whether to compute the full pressure tensor
\param compute_rotational_energy whether to compute the rotational kinetic energy
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_final(Scalar *d_properties,
Scalar4 *d_vel,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_args& args,
bool compute_pressure_tensor,
bool compute_rotational_energy
)
{
assert(d_properties);
assert(d_vel);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
unsigned int shared_bytes = sizeof(Scalar4)*final_block_size;
Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx
+ args.external_virial_yy
+ args.external_virial_zz);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_thermo_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties,
args.d_scratch,
args.d_scratch_rot,
args.ndof,
box,
args.D,
group_size,
args.n_blocks,
external_virial,
args.external_energy);
if (compute_pressure_tensor)
{
shared_bytes = 6 * sizeof(Scalar) * final_block_size;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_pressure_tensor_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties,
args.d_scratch_pressure_tensor,
box,
group_size,
args.n_blocks,
args.external_virial_xx,
args.external_virial_xy,
args.external_virial_xz,
args.external_virial_yy,
args.external_virial_yz,
args.external_virial_zz,
args.D == 2);
}
return hipSuccess;
}
| f6bf317d5407ef64fe42582502bec9c6dc3401e0.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "ComputeThermoGPU.cuh"
#include "VectorMath.h"
#include <assert.h>
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar4 compute_thermo_final_sdata[];
//! Shared memory used in reducing the sums of the pressure tensor
extern __shared__ Scalar compute_pressure_tensor_sdata[];
//! Shared memory used in reducing the sum of the rotational kinetic energy
extern __shared__ Scalar compute_ke_rot_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_velocity Particle velocity and mass array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar4 to keep pointer management down.
- 2*Kinetic energy is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into shared memory
and then the block performs a reduction in parallel to produce a partial sum output for the block. These
partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed
for this kernel to run.
*/
__global__ void gpu_compute_thermo_partial_sums(Scalar4 *d_scratch,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int virial_pitch,
Scalar4 *d_velocity,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
// update positions to the next timestep and update velocities to the next half step
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0/3.0)*
(d_net_virial[0*virial_pitch+idx] // xx
+d_net_virial[3*virial_pitch+idx] // yy
+d_net_virial[5*virial_pitch+idx]); // zz
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
// compute our contribution to the sum
my_element.x = mass * (vel.x*vel.x + vel.y*vel.y + vel.z*vel.z);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_sdata[threadIdx.x].x += compute_thermo_sdata[threadIdx.x + offs].x;
compute_thermo_sdata[threadIdx.x].y += compute_thermo_sdata[threadIdx.x + offs].y;
compute_thermo_sdata[threadIdx.x].z += compute_thermo_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar4(res.x, res.y, res.z, 0);
}
}
//! Perform partial sums of the pressure tensor on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_velocity Particle velocity and mass array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group
\param offset Offset of this GPU in the list of group members
\param block_offset Offset of this GPU in the array of partial sums
\param num_blocks Total number of partial sums by all GPUs
One thread is executed per group member. That thread reads in the six values (components of the pressure tensor)
for its member into shared memory and then the block performs a reduction in parallel to produce a partial sum output for the block.
These partial sums are written to d_scratch[i*gridDim.x + blockIdx.x], where i=0..5 is the index of the component.
For this kernel to run, 6*sizeof(Scalar)*block_size of dynamic shared memory are needed.
*/
__global__ void gpu_compute_pressure_tensor_partial_sums(Scalar *d_scratch,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const unsigned int virial_pitch,
Scalar4 *d_velocity,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset,
unsigned int num_blocks)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar my_element[6]; // element of scratch space read in
// non-participating threads: contribute 0 to the sum
my_element[0] = 0;
my_element[1] = 0;
my_element[2] = 0;
my_element[3] = 0;
my_element[4] = 0;
my_element[5] = 0;
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
// compute contribution to pressure tensor and store it in my_element
Scalar4 vel = d_velocity[idx];
Scalar mass = vel.w;
my_element[0] = mass*vel.x*vel.x + d_net_virial[0*virial_pitch+idx]; // xx
my_element[1] = mass*vel.x*vel.y + d_net_virial[1*virial_pitch+idx]; // xy
my_element[2] = mass*vel.x*vel.z + d_net_virial[2*virial_pitch+idx]; // xz
my_element[3] = mass*vel.y*vel.y + d_net_virial[3*virial_pitch+idx]; // yy
my_element[4] = mass*vel.y*vel.z + d_net_virial[4*virial_pitch+idx]; // yz
my_element[5] = mass*vel.z*vel.z + d_net_virial[5*virial_pitch+idx]; // zz
}
}
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] = my_element[i];
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x+threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < 6; i++)
d_scratch[num_blocks * i + blockIdx.x + block_offset] = compute_pressure_tensor_sdata[i*blockDim.x];
}
}
//! Perform partial sums of the rotational KE on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param d_orientation Orientation quaternions from ParticleData
\param d_angmom Conjugate quaternions from ParticleData
\param d_inertia Moments of inertia from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group processed by this GPU
\param offset Offset of this GPU in the list of group members
\param block_offset Output offset of this GPU
*/
__global__ void gpu_compute_rotational_ke_partial_sums(Scalar *d_scratch,
const Scalar4 *d_orientation,
const Scalar4 *d_angmom,
const Scalar3 *d_inertia,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = Scalar(0.0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> I(d_inertia[idx]);
quat<Scalar> s(Scalar(0.5)*conj(q)*p);
Scalar ke_rot(0.0);
if (I.x >= EPSILON)
{
ke_rot += s.v.x*s.v.x/I.x;
}
if (I.y >= EPSILON)
{
ke_rot += s.v.y*s.v.y/I.y;
}
if (I.z >= EPSILON)
{
ke_rot += s.v.z*s.v.z/I.z;
}
// compute our contribution to the sum
my_element = ke_rot*Scalar(1.0/2.0);
}
}
compute_ke_rot_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
compute_ke_rot_sdata[threadIdx.x] += compute_ke_rot_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_scratch[blockIdx.x + block_offset] = compute_ke_rot_sdata[0];
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param d_scratch_rot Partial sums of rotational kinetic energy
\param ndof Number of degrees of freedom this group possesses
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
sizeof(Scalar4)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_final_sums(Scalar *d_properties,
Scalar4 *d_scratch,
Scalar *d_scratch_rot,
unsigned int ndof,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar external_virial,
Scalar external_energy
)
{
Scalar4 final_sum = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0),Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar4 scratch = d_scratch[start + threadIdx.x];
Scalar scratch_rot = d_scratch_rot[start + threadIdx.x];
compute_thermo_final_sdata[threadIdx.x] = make_scalar4(scratch.x, scratch.y, scratch.z, scratch_rot);
}
else
compute_thermo_final_sdata[threadIdx.x] = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_final_sdata[threadIdx.x].x += compute_thermo_final_sdata[threadIdx.x + offs].x;
compute_thermo_final_sdata[threadIdx.x].y += compute_thermo_final_sdata[threadIdx.x + offs].y;
compute_thermo_final_sdata[threadIdx.x].z += compute_thermo_final_sdata[threadIdx.x + offs].z;
compute_thermo_final_sdata[threadIdx.x].w += compute_thermo_final_sdata[threadIdx.x + offs].w;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_final_sdata[0].x;
final_sum.y += compute_thermo_final_sdata[0].y;
final_sum.z += compute_thermo_final_sdata[0].z;
final_sum.w += compute_thermo_final_sdata[0].w;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar ke_trans_total = final_sum.x * Scalar(0.5);
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
Scalar ke_rot_total = final_sum.w;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0)/Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar pressure = (Scalar(2.0) * ke_trans_total / Scalar(D) + W) / volume;
// fill out the GPUArray
d_properties[thermo_index::translational_kinetic_energy] = Scalar(ke_trans_total);
d_properties[thermo_index::rotational_kinetic_energy] = Scalar(ke_rot_total);
d_properties[thermo_index::potential_energy] = Scalar(pe_total);
d_properties[thermo_index::pressure] = pressure;
}
}
//! Complete partial sums and compute final pressure tensor
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param external_virial_xx External contribution to virial (xx component)
\param external_virial_xy External contribution to virial (xy component)
\param external_virial_xz External contribution to virial (xz component)
\param external_virial_yy External contribution to virial (yy component)
\param external_virial_yz External contribution to virial (yz component)
\param external_virial_zz External contribution to virial (zz component)
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
6*sizeof(Scalar)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_pressure_tensor_final_sums(Scalar *d_properties,
Scalar *d_scratch,
BoxDim box,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar external_virial_xx,
Scalar external_virial_xy,
Scalar external_virial_xz,
Scalar external_virial_yy,
Scalar external_virial_yz,
Scalar external_virial_zz,
bool twod)
{
Scalar final_sum[6];
final_sum[0] = external_virial_xx;
final_sum[1] = external_virial_xy;
final_sum[2] = external_virial_xz;
final_sum[3] = external_virial_yy;
final_sum[4] = external_virial_yz;
final_sum[5] = external_virial_zz;
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = d_scratch[i*num_partial_sums + start + threadIdx.x];
}
else
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i * blockDim.x + threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
for (unsigned int i = 0; i < 6; i++)
compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x] += compute_pressure_tensor_sdata[i*blockDim.x + threadIdx.x + offs];
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
for (unsigned int i = 0; i < 6; i++)
final_sum[i] += compute_pressure_tensor_sdata[i*blockDim.x];
}
}
if (threadIdx.x == 0)
{
// fill out the GPUArray
// we have thus far calculated the sum of the kinetic part of the pressure tensor
// and the virial part, the definition includes an inverse factor of the box volume
Scalar V = box.getVolume(twod);
d_properties[thermo_index::pressure_xx] = final_sum[0]/V;
d_properties[thermo_index::pressure_xy] = final_sum[1]/V;
d_properties[thermo_index::pressure_xz] = final_sum[2]/V;
d_properties[thermo_index::pressure_yy] = final_sum[3]/V;
d_properties[thermo_index::pressure_yz] = final_sum[4]/V;
d_properties[thermo_index::pressure_zz] = final_sum[5]/V;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_properties Array to write computed properties
\param d_vel particle velocities and masses on the GPU
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param compute_pressure_tensor whether to compute the full pressure tensor
\param compute_rotational_energy whether to compute the rotational kinetic energy
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
cudaError_t gpu_compute_thermo_partial(Scalar *d_properties,
Scalar4 *d_vel,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_args& args,
bool compute_pressure_tensor,
bool compute_rotational_energy,
const GPUPartition& gpu_partition
)
{
assert(d_properties);
assert(d_vel);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork/args.block_size+1, 1, 1);
dim3 threads(args.block_size, 1, 1);
unsigned int shared_bytes = sizeof(Scalar3)*args.block_size;
gpu_compute_thermo_partial_sums<<<grid,threads, shared_bytes>>>(args.d_scratch,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_vel,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
if (compute_pressure_tensor)
{
assert(args.d_scratch_pressure_tensor);
shared_bytes = 6 * sizeof(Scalar) * args.block_size;
// run the kernel
gpu_compute_pressure_tensor_partial_sums<<<grid, threads, shared_bytes>>>(args.d_scratch_pressure_tensor,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_vel,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset,
args.n_blocks);
}
if (compute_rotational_energy)
{
assert(args.d_scratch_pressure_tensor);
shared_bytes = sizeof(Scalar) * args.block_size;
// run the kernel
gpu_compute_rotational_ke_partial_sums<<<grid, threads, shared_bytes>>>(args.d_scratch_rot,
args.d_orientation,
args.d_angmom,
args.d_inertia,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
}
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return cudaSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_vel particle velocities and masses on the GPU
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param compute_pressure_tensor whether to compute the full pressure tensor
\param compute_rotational_energy whether to compute the rotational kinetic energy
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
cudaError_t gpu_compute_thermo_final(Scalar *d_properties,
Scalar4 *d_vel,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_args& args,
bool compute_pressure_tensor,
bool compute_rotational_energy
)
{
assert(d_properties);
assert(d_vel);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
unsigned int shared_bytes = sizeof(Scalar4)*final_block_size;
Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx
+ args.external_virial_yy
+ args.external_virial_zz);
// run the kernel
gpu_compute_thermo_final_sums<<<grid, threads, shared_bytes>>>(d_properties,
args.d_scratch,
args.d_scratch_rot,
args.ndof,
box,
args.D,
group_size,
args.n_blocks,
external_virial,
args.external_energy);
if (compute_pressure_tensor)
{
shared_bytes = 6 * sizeof(Scalar) * final_block_size;
// run the kernel
gpu_compute_pressure_tensor_final_sums<<<grid, threads, shared_bytes>>>(d_properties,
args.d_scratch_pressure_tensor,
box,
group_size,
args.n_blocks,
args.external_virial_xx,
args.external_virial_xy,
args.external_virial_xz,
args.external_virial_yy,
args.external_virial_yz,
args.external_virial_zz,
args.D == 2);
}
return cudaSuccess;
}
|
427894115f587543d2a801bb9246dc8d690c1eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "real.h"
#include "gpuerrchk.cuh"
#include "reduce_hip.cuh"
//kernel used to sum an array containing size elements, where size can be 2^0,2^1,..,2^10
//intended to be called as a single thread block with size threads
//Note: here is the reason that the below operation never reads outside the bounds of partialsum if it has length 2^k.
//since t%(2*stride)==0 we have that t=m*2^{i+1} for some m and where we are in iteration i (stride=2^i). Since t is in the bounds of the array,
//t<2^k.
//Thus 2m<2^{k-i} (*).
//But then t+2^i<2^{k} if and only if (2m+1)<2^{k-i}. This holds since both sides of equation * are even.
__global__
void kreducev1(real* X, real* d_ans, unsigned int size){
extern __shared__ real partialsum[];
unsigned int t= threadIdx.x;
partialsum[t]=X[t];
for (unsigned int stride = 1; stride < size; stride *= 2 ){
__syncthreads();
if (t % (2*stride) == 0)
partialsum[t]+=partialsum[t+stride];
}
if (t == 0)
*d_ans=partialsum[0];
}
__global__
void kreducev2(real* X, real* d_ans, unsigned int size){
extern __shared__ real partialsum[];
unsigned int t= threadIdx.x;
partialsum[t]=X[t];
for (unsigned int stride = size/2; stride >= 1; stride = stride >> 1){
__syncthreads();
if (t < stride)
partialsum[t] += partialsum[t+stride];
}
if (t == 0)
*d_ans=partialsum[0];
}
real reducev1(real* X, unsigned int numel){
int memsize= sizeof(real)*numel;
real* d_X;
real* d_ans;
gpuErrchk(hipMalloc( (void**) &d_X, memsize));
gpuErrchk(hipMalloc( (void**) &d_ans, sizeof(real) ) );
gpuErrchk(hipMemcpy(d_X, X, memsize, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kreducev1), dim3(1), dim3(numel), memsize, 0, d_X,d_ans,numel);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipFree(d_X));
real ans;
gpuErrchk(hipMemcpy(&ans,d_ans,sizeof(real),hipMemcpyDeviceToHost ));
gpuErrchk(hipFree(d_ans));
return ans;
}
real reducev2(real* X, unsigned int numel){
int memsize= sizeof(real)*numel;
real* d_X;
real* d_ans;
gpuErrchk(hipMalloc( (void**) &d_X, memsize));
gpuErrchk(hipMalloc( (void**) &d_ans, sizeof(real) ) );
gpuErrchk(hipMemcpy(d_X, X, memsize, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kreducev2), dim3(1), dim3(numel), memsize, 0, d_X,d_ans,numel);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipFree(d_X));
real ans;
gpuErrchk(hipMemcpy(&ans,d_ans,sizeof(real),hipMemcpyDeviceToHost ));
gpuErrchk(hipFree(d_ans));
return ans;
}
| 427894115f587543d2a801bb9246dc8d690c1eff.cu | #include "real.h"
#include "gpuerrchk.cuh"
#include "reduce.cuh"
//kernel used to sum an array containing size elements, where size can be 2^0,2^1,..,2^10
//intended to be called as a single thread block with size threads
//Note: here is the reason that the below operation never reads outside the bounds of partialsum if it has length 2^k.
//since t%(2*stride)==0 we have that t=m*2^{i+1} for some m and where we are in iteration i (stride=2^i). Since t is in the bounds of the array,
//t<2^k.
//Thus 2m<2^{k-i} (*).
//But then t+2^i<2^{k} if and only if (2m+1)<2^{k-i}. This holds since both sides of equation * are even.
__global__
void kreducev1(real* X, real* d_ans, unsigned int size){
extern __shared__ real partialsum[];
unsigned int t= threadIdx.x;
partialsum[t]=X[t];
for (unsigned int stride = 1; stride < size; stride *= 2 ){
__syncthreads();
if (t % (2*stride) == 0)
partialsum[t]+=partialsum[t+stride];
}
if (t == 0)
*d_ans=partialsum[0];
}
__global__
void kreducev2(real* X, real* d_ans, unsigned int size){
extern __shared__ real partialsum[];
unsigned int t= threadIdx.x;
partialsum[t]=X[t];
for (unsigned int stride = size/2; stride >= 1; stride = stride >> 1){
__syncthreads();
if (t < stride)
partialsum[t] += partialsum[t+stride];
}
if (t == 0)
*d_ans=partialsum[0];
}
real reducev1(real* X, unsigned int numel){
int memsize= sizeof(real)*numel;
real* d_X;
real* d_ans;
gpuErrchk(cudaMalloc( (void**) &d_X, memsize));
gpuErrchk(cudaMalloc( (void**) &d_ans, sizeof(real) ) );
gpuErrchk(cudaMemcpy(d_X, X, memsize, cudaMemcpyHostToDevice));
kreducev1<<<1, numel, memsize>>>(d_X,d_ans,numel);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaFree(d_X));
real ans;
gpuErrchk(cudaMemcpy(&ans,d_ans,sizeof(real),cudaMemcpyDeviceToHost ));
gpuErrchk(cudaFree(d_ans));
return ans;
}
real reducev2(real* X, unsigned int numel){
int memsize= sizeof(real)*numel;
real* d_X;
real* d_ans;
gpuErrchk(cudaMalloc( (void**) &d_X, memsize));
gpuErrchk(cudaMalloc( (void**) &d_ans, sizeof(real) ) );
gpuErrchk(cudaMemcpy(d_X, X, memsize, cudaMemcpyHostToDevice));
kreducev2<<<1, numel, memsize>>>(d_X,d_ans,numel);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaFree(d_X));
real ans;
gpuErrchk(cudaMemcpy(&ans,d_ans,sizeof(real),cudaMemcpyDeviceToHost ));
gpuErrchk(cudaFree(d_ans));
return ans;
}
|
93217041e9d7da393d8b8c02cfb9635c8b0a3f01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <fstream>
// #include <boost/iostreams/stream.hpp>
// #include <libs/iostreams/src/mapped_file.cpp>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstring>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/scan.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_cooperative_groups.h>
#define MAX_ENTRIES 11897027
namespace cg = cooperative_groups;
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result!=hipSuccess){
fprintf(stderr,"CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result==hipSuccess);
}
#endif
return result;
}
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
///////////////////////////////////////////////////////////////////////////////
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__host__ __device__ class Point2D
{
// public:
// float x, y, w;
// __host__ __device__ Point2D& operator=(const Point2D& target){ x = target.x; y = target.y; w = target.w; return *this; }
// __host__ __device__ Point2D operator+(const Point2D& b){ Point2D results; results.x = x + b.x; results.y = y + b.y; results.w = w + b.w; return results; }
// __host__ __device__ Point2D operator+(const float b)
// {
// Point2D results;
// results.x = min_(1, x + b);
// results.y = min_(1, y + b);
// return results;
// }
// __host__ __device__ Point2D operator-(const float b)
// {
// Point2D results;
// results.x = max_(0,x - b);
// results.y = max_(0,y - b);
// return results;
// }
// friend ostream& operator<<(ostream& os, const Point2D& p)
// {
// os << p.x<<" " <<p.y<<" "<<p.w ;
// return os;
// }
};
//Generate a million random points;
void generateRandomPointCloud(vector<Point2D>& points, size_t size = 1000000)
{
//std::cout << "Generating " << size << " point cloud...";
points.resize(size);
for (size_t i = 0; i<size; i++)
{
points[i].x = (rand() % RAND_MAX) / float(RAND_MAX);
points[i].y = (rand() % RAND_MAX) / float(RAND_MAX);
points[i].w = 0.0;
}
//std::cout << "done\n";
}
int int main(int argc, char const *argv[]) {
/* code */
return 0;
}
// #define GPUCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
// {
// if (code != hipSuccess)
// {
// fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
// if (abort) exit(code);
// }
// }
//
// hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
// __global__ void addKernel(int *c, const int *a, const int *b)
// {
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// }
| 93217041e9d7da393d8b8c02cfb9635c8b0a3f01.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <fstream>
// #include <boost/iostreams/stream.hpp>
// #include <libs/iostreams/src/mapped_file.cpp>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstring>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/scan.h>
#include <ctime>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cooperative_groups.h>
#define MAX_ENTRIES 11897027
namespace cg = cooperative_groups;
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result!=cudaSuccess){
fprintf(stderr,"CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result==cudaSuccess);
}
#endif
return result;
}
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
///////////////////////////////////////////////////////////////////////////////
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__host__ __device__ class Point2D
{
// public:
// float x, y, w;
// __host__ __device__ Point2D& operator=(const Point2D& target){ x = target.x; y = target.y; w = target.w; return *this; }
// __host__ __device__ Point2D operator+(const Point2D& b){ Point2D results; results.x = x + b.x; results.y = y + b.y; results.w = w + b.w; return results; }
// __host__ __device__ Point2D operator+(const float b)
// {
// Point2D results;
// results.x = min_(1, x + b);
// results.y = min_(1, y + b);
// return results;
// }
// __host__ __device__ Point2D operator-(const float b)
// {
// Point2D results;
// results.x = max_(0,x - b);
// results.y = max_(0,y - b);
// return results;
// }
// friend ostream& operator<<(ostream& os, const Point2D& p)
// {
// os << p.x<<" " <<p.y<<" "<<p.w ;
// return os;
// }
};
//Generate a million random points;
void generateRandomPointCloud(vector<Point2D>& points, size_t size = 1000000)
{
//std::cout << "Generating " << size << " point cloud...";
points.resize(size);
for (size_t i = 0; i<size; i++)
{
points[i].x = (rand() % RAND_MAX) / float(RAND_MAX);
points[i].y = (rand() % RAND_MAX) / float(RAND_MAX);
points[i].w = 0.0;
}
//std::cout << "done\n";
}
int int main(int argc, char const *argv[]) {
/* code */
return 0;
}
// #define GPUCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
// inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
// {
// if (code != cudaSuccess)
// {
// fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
// if (abort) exit(code);
// }
// }
//
// cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
// __global__ void addKernel(int *c, const int *a, const int *b)
// {
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// }
|
0fb4c460a91c97b261502d20266ff2af5f618ffa.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/stat.h>
#include <dirent.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define total_no_ascii_chars 95
#define max_encrypted_pwd_length 8
extern "C"
void ulong_to_char_array(unsigned long search_pos, char *output);
extern "C"
void runSerial(unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size);
extern "C"
unsigned long char_array_to_ulong(char *input, uint array_lenght);
static struct timeval tm1;
static inline void start()
{
gettimeofday(&tm1, NULL);
}
static inline void stop()
{
struct timeval tm2;
gettimeofday(&tm2, NULL);
unsigned long long t = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000;
printf("Processing time: %llu (ms)\n", t);
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void runParallel(int argc, char **argv,
unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size, uint key_list_size);
__device__ __forceinline__
unsigned long d_encrypt(unsigned long input, uint encryption_key) {
unsigned long tmp_pwd = input * encryption_key;
return tmp_pwd;
}
__device__ int g_found = 0;
__device__ unsigned long d_answer = 0;
__global__ void
crackPassword(unsigned long encrypted_password, unsigned long pageDim, unsigned long pageId)
{
const unsigned long tidx = threadIdx.x;
const unsigned long tidy = threadIdx.y;
const unsigned long bid = blockIdx.x;
const unsigned int num_threads = blockDim.x;
const unsigned long global_tid = (pageId * gridDim.x * blockDim.x) + (bid * num_threads) + tidx;
const uint encryption_keys[] = {
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
467, 479, 487, 491, 499, 503, 509, 521, 523, 541
};
uint key = encryption_keys[tidy];
if (g_found) {
return;
}
unsigned long tmp_encrypted = d_encrypt(global_tid, key);
if (encrypted_password == tmp_encrypted) {
d_answer = global_tid;
g_found = 1;
}
}
int main(int argc, char **argv)
{
const unsigned int pwd_max_size = 32 + 1;
const uint key_list_size = 90;
char encrypted_password[pwd_max_size];
printf("Enter the encrypted password:\n");
scanf("%7s", encrypted_password);
uint pwd_size = strlen(encrypted_password);
unsigned int pwd_mem_size = (pwd_size + 1) * sizeof(char);
unsigned long search_space_size = pow(total_no_ascii_chars, 5);
printf("Search space size: %lu\n", search_space_size * key_list_size);
unsigned long long_encrypted = char_array_to_ulong(encrypted_password, 7);
runParallel(argc, argv,
long_encrypted, search_space_size, pwd_mem_size, key_list_size);
runSerial(long_encrypted, search_space_size, pwd_mem_size);
exit(EXIT_SUCCESS);
}
void
runParallel(int argc, char **argv,
unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size, uint key_list_size)
{
printf("Running parallel version...\n");
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
// setup execution parameters
const uint num_threads_per_block = 10;
const uint num_blocks = 5000;
unsigned long numberIterations = (search_space_size / (num_blocks * num_threads_per_block)) + 1;
printf("Launching %lu iterations...\n", numberIterations);
printf("Launching %d blocks per iteration...\n", num_blocks);
printf("Launching %d threads per block...\n", num_threads_per_block * key_list_size);
printf("Launching %d threads per iteration...\n", num_blocks * num_threads_per_block * key_list_size);
printf("Launching %lu total threads...\n", num_blocks * num_threads_per_block * key_list_size * numberIterations);
dim3 grid(num_blocks, 1, 1);
dim3 threads(num_threads_per_block, key_list_size, 1);
// allocate mem for the result on host side
char *decrypted_password = (char *) malloc(pwd_mem_size);
start();
// execute the kernel
for (uint i=0; i < numberIterations; i++) {
hipLaunchKernelGGL(( crackPassword), dim3(grid), dim3(threads), 0, 0, encrypted_password, numberIterations, i);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// copy result from device to host
typeof(d_answer) answer;
checkCudaErrors(hipMemcpyFromSymbol(&answer, d_answer, sizeof(answer), 0, hipMemcpyDeviceToHost));
if (answer != 0) {
ulong_to_char_array(answer, decrypted_password);
printf("Decrypted password: %s \n", decrypted_password);
break;
}
}
stop();
// cleanup memory
free(decrypted_password);
}
| 0fb4c460a91c97b261502d20266ff2af5f618ffa.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/stat.h>
#include <dirent.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define total_no_ascii_chars 95
#define max_encrypted_pwd_length 8
extern "C"
void ulong_to_char_array(unsigned long search_pos, char *output);
extern "C"
void runSerial(unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size);
extern "C"
unsigned long char_array_to_ulong(char *input, uint array_lenght);
static struct timeval tm1;
static inline void start()
{
gettimeofday(&tm1, NULL);
}
static inline void stop()
{
struct timeval tm2;
gettimeofday(&tm2, NULL);
unsigned long long t = 1000 * (tm2.tv_sec - tm1.tv_sec) + (tm2.tv_usec - tm1.tv_usec) / 1000;
printf("Processing time: %llu (ms)\n", t);
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void runParallel(int argc, char **argv,
unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size, uint key_list_size);
__device__ __forceinline__
unsigned long d_encrypt(unsigned long input, uint encryption_key) {
unsigned long tmp_pwd = input * encryption_key;
return tmp_pwd;
}
__device__ int g_found = 0;
__device__ unsigned long d_answer = 0;
__global__ void
crackPassword(unsigned long encrypted_password, unsigned long pageDim, unsigned long pageId)
{
const unsigned long tidx = threadIdx.x;
const unsigned long tidy = threadIdx.y;
const unsigned long bid = blockIdx.x;
const unsigned int num_threads = blockDim.x;
const unsigned long global_tid = (pageId * gridDim.x * blockDim.x) + (bid * num_threads) + tidx;
const uint encryption_keys[] = {
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
467, 479, 487, 491, 499, 503, 509, 521, 523, 541
};
uint key = encryption_keys[tidy];
if (g_found) {
return;
}
unsigned long tmp_encrypted = d_encrypt(global_tid, key);
if (encrypted_password == tmp_encrypted) {
d_answer = global_tid;
g_found = 1;
}
}
int main(int argc, char **argv)
{
const unsigned int pwd_max_size = 32 + 1;
const uint key_list_size = 90;
char encrypted_password[pwd_max_size];
printf("Enter the encrypted password:\n");
scanf("%7s", encrypted_password);
uint pwd_size = strlen(encrypted_password);
unsigned int pwd_mem_size = (pwd_size + 1) * sizeof(char);
unsigned long search_space_size = pow(total_no_ascii_chars, 5);
printf("Search space size: %lu\n", search_space_size * key_list_size);
unsigned long long_encrypted = char_array_to_ulong(encrypted_password, 7);
runParallel(argc, argv,
long_encrypted, search_space_size, pwd_mem_size, key_list_size);
runSerial(long_encrypted, search_space_size, pwd_mem_size);
exit(EXIT_SUCCESS);
}
void
runParallel(int argc, char **argv,
unsigned long encrypted_password, unsigned long search_space_size, unsigned int pwd_mem_size, uint key_list_size)
{
printf("Running parallel version...\n");
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
// setup execution parameters
const uint num_threads_per_block = 10;
const uint num_blocks = 5000;
unsigned long numberIterations = (search_space_size / (num_blocks * num_threads_per_block)) + 1;
printf("Launching %lu iterations...\n", numberIterations);
printf("Launching %d blocks per iteration...\n", num_blocks);
printf("Launching %d threads per block...\n", num_threads_per_block * key_list_size);
printf("Launching %d threads per iteration...\n", num_blocks * num_threads_per_block * key_list_size);
printf("Launching %lu total threads...\n", num_blocks * num_threads_per_block * key_list_size * numberIterations);
dim3 grid(num_blocks, 1, 1);
dim3 threads(num_threads_per_block, key_list_size, 1);
// allocate mem for the result on host side
char *decrypted_password = (char *) malloc(pwd_mem_size);
start();
// execute the kernel
for (uint i=0; i < numberIterations; i++) {
crackPassword<<<grid, threads>>>(encrypted_password, numberIterations, i);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// copy result from device to host
typeof(d_answer) answer;
checkCudaErrors(cudaMemcpyFromSymbol(&answer, d_answer, sizeof(answer), 0, cudaMemcpyDeviceToHost));
if (answer != 0) {
ulong_to_char_array(answer, decrypted_password);
printf("Decrypted password: %s \n", decrypted_password);
break;
}
}
stop();
// cleanup memory
free(decrypted_password);
}
|
8c18afc1c7460212e8986e5322d4ad383d96a163.hip | // !!! This is a file automatically generated by hipify!!!
/**
Question 2
Compile: nvcc q2.cu -o q2
Run: srun --pty --nodes 1 --job-name=interactive --partition=gpu --reservation=EECE5640 --gres=gpu:1 ./q2
DEBUG:
0 Print histogram
1 Print file lines to ensure readability of file
2 Print summation of coauthors per author
3 Print max co-authors with respective author and execution time
**/
// Include libraries //
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
// Macros //
#define DEBUG 3
// CUDA Kernel //
__global__ void sort(int *a, int *c, int n){
// Get global threadID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Ensure we are within bounds
if (id < n){
if(id == 0){
c[0] = 0;
}
else{
if(a[id] == a[id - 1]){
c[id] = 1;
}
else{
c[id] = 0;
}
}
}
}
// Main Function //
int main( int argc, char* argv[] ){
// Initialize local variables
char c, string[100];
int i;
int N, count, buffer;
float milliseconds = 0, milliseconds2 = 0;
// Timing using CUDA Events
hipEvent_t start, start2, midpoint, end;
hipEventCreate(&start);
hipEventCreate(&start2);
hipEventCreate(&midpoint);
hipEventCreate(&end);
// Start Clock
hipEventRecord(start);
hipEventRecord(start2);
// Allocate space
count = 0;
buffer = 5;
int *chunk1 = (int *) malloc (buffer * sizeof(int));
int *chunk2 = (int *) malloc (buffer * sizeof(int));
// Initiate file variables
FILE *file;
i = 0;
// Open file
file = fopen("dblp-co-authors.txt","r");
// Iterate through each character of the file
for(c = getc(file); c != EOF; c = getc(file)){
// If character is a space
if (c == ' '){
// Initialize to 0
chunk1[count] = 0;
// Convert string to int and place into array
chunk1[count] = atoi(string);
// Replace string with 0s
memset(string, 0, strlen(string));
}
// If character is an end of line
else if(c == '\n'){
// Initialize to 0
chunk2[count] = 0;
// Convert string to int and place into array
chunk2[count] = atoi(string);
// DEBUG Mode 2 ONLY
if(DEBUG == 1){
printf("%d %d\n", chunk1[count], chunk2[count]);
}
// Replace string with 0s
memset(string, 0, strlen(string));
// Increment counter
count++;
// Reallocate chunk array to ensure it is big enough
chunk1 = (int *) realloc(chunk1, sizeof(int) * (count + buffer));
chunk2 = (int *) realloc(chunk2, sizeof(int) * (count + buffer));
}
// If character is a information we want
else{
// Add character to string array
strncat(string, &c, 1);
}
}
// Initialize zero element
chunk1[0] = 0;
chunk2[0] = 2;
// Ensure last value gets placed into arrays
chunk1[317079] = 317079;
chunk2[317079] = 317080;
// Close file
fclose(file);
// Stop midpoint clock
hipEventRecord(midpoint);
hipEventElapsedTime(&milliseconds, start2, midpoint);
// Distribute data to GPU threads
N = count;
// Allocate space for counting arrays
int *d_chunk1 = (int *) malloc (N * sizeof(int));
int *d_count1 = (int *) malloc ((N + 10) * sizeof(int));
int *count_array1 = (int *) malloc ((N + 10) * sizeof(int));
int *opposite_count_array = (int *) malloc (N * sizeof(int));
// Allocate memory for each vector on GPU
hipMalloc( &d_chunk1, (N + 1) * sizeof(int) );
hipMalloc( &d_count1, (N + 1) * sizeof(int) );
// Copy vectors to device
hipMemcpy( d_chunk1, chunk1, N * sizeof(int), hipMemcpyHostToDevice );
// Initialize count arrays
for(i = 0; i < N; i++){
count_array1[i] = 0;
opposite_count_array[i] = 0;
}
// Copy vectors to device
hipMemcpy( d_count1, count_array1, N * sizeof(int), hipMemcpyHostToDevice );
// Initialize variables for CUDA
int blockSize, gridSize;
blockSize = 1024; // Number of threads in each thread block
gridSize = (int) ceil((float) N / blockSize); // Number of thread blocks in grid
// Execute the kernel
hipLaunchKernelGGL(( sort), dim3(gridSize), dim3(blockSize), 0, 0, d_chunk1, d_count1, N * sizeof(int));
// Copy back to host
hipMemcpy( count_array1, d_count1, N * sizeof(int), hipMemcpyDeviceToHost );
// Count authors and place into opposite counting array
int index = 0;
for(i = 0; i < N; i++){
if(count_array1[i] == 0){
opposite_count_array[chunk1[index]] = 1;
}
else{
opposite_count_array[chunk1[index]]++;
}
index++;
}
// Count authors in chunk 2
for(i = 0; i < N; i++){
opposite_count_array[chunk2[i]]++;
}
// End clock
hipEventRecord(end);
hipEventElapsedTime(&milliseconds2, start, end);
// Print results
if(DEBUG == 2){
printf("Histogram of authors:\n");
for(i = 1; i < (N / 2); i++){
if(opposite_count_array[i] != 0){
printf("(%d, %d)\n", i, opposite_count_array[i]);
}
}
printf("\n");
}
else if(DEBUG == 3){
int max = 0;
int max_index[N];
for(i = 1; i < (N / 2); i++){
if(opposite_count_array[i] != 0){
if(opposite_count_array[i] >= max){
max = opposite_count_array[i];
}
max_index[i] = 0;
}
}
for(i = 0; i < (N / 2); i++){
if(opposite_count_array[i] == max)
max_index[i] = i;
}
printf("\n\n Midpoint Execution Time (milliseconds): %10.8f\n", milliseconds);
printf("Total Execution Time (milliseconds): %10.8f", milliseconds2);
printf("\n\nThe largest number of co-authors is %d\n", max);
printf("The author(s) is/are: ");
for(i = 0; i < (N / 2); i++){
if(max_index[i] != 0){
printf("%d ", i);
}
}
printf("\n\n");
}
else if (DEBUG == 0){
for(i = 1; i < (N / 2); i++){
count_array1[i] = 0;
if(opposite_count_array[i] != 0){
count_array1[opposite_count_array[i]]++;
}
}
for(i = 0; i < (N / 2); i++){
// Author ID, co-author count
if(count_array1[i] != 0)
printf("%d,%d\n", i, count_array1[i]);
}
}
// Cleanly exit
hipFree(d_chunk1);
hipFree(d_count1);
free(chunk1);
free(chunk2);
free(count_array1);
free(opposite_count_array);
return 0;
}
| 8c18afc1c7460212e8986e5322d4ad383d96a163.cu | /**
Question 2
Compile: nvcc q2.cu -o q2
Run: srun --pty --nodes 1 --job-name=interactive --partition=gpu --reservation=EECE5640 --gres=gpu:1 ./q2
DEBUG:
0 Print histogram
1 Print file lines to ensure readability of file
2 Print summation of coauthors per author
3 Print max co-authors with respective author and execution time
**/
// Include libraries //
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
// Macros //
#define DEBUG 3
// CUDA Kernel //
__global__ void sort(int *a, int *c, int n){
// Get global threadID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Ensure we are within bounds
if (id < n){
if(id == 0){
c[0] = 0;
}
else{
if(a[id] == a[id - 1]){
c[id] = 1;
}
else{
c[id] = 0;
}
}
}
}
// Main Function //
int main( int argc, char* argv[] ){
// Initialize local variables
char c, string[100];
int i;
int N, count, buffer;
float milliseconds = 0, milliseconds2 = 0;
// Timing using CUDA Events
cudaEvent_t start, start2, midpoint, end;
cudaEventCreate(&start);
cudaEventCreate(&start2);
cudaEventCreate(&midpoint);
cudaEventCreate(&end);
// Start Clock
cudaEventRecord(start);
cudaEventRecord(start2);
// Allocate space
count = 0;
buffer = 5;
int *chunk1 = (int *) malloc (buffer * sizeof(int));
int *chunk2 = (int *) malloc (buffer * sizeof(int));
// Initiate file variables
FILE *file;
i = 0;
// Open file
file = fopen("dblp-co-authors.txt","r");
// Iterate through each character of the file
for(c = getc(file); c != EOF; c = getc(file)){
// If character is a space
if (c == ' '){
// Initialize to 0
chunk1[count] = 0;
// Convert string to int and place into array
chunk1[count] = atoi(string);
// Replace string with 0s
memset(string, 0, strlen(string));
}
// If character is an end of line
else if(c == '\n'){
// Initialize to 0
chunk2[count] = 0;
// Convert string to int and place into array
chunk2[count] = atoi(string);
// DEBUG Mode 2 ONLY
if(DEBUG == 1){
printf("%d %d\n", chunk1[count], chunk2[count]);
}
// Replace string with 0s
memset(string, 0, strlen(string));
// Increment counter
count++;
// Reallocate chunk array to ensure it is big enough
chunk1 = (int *) realloc(chunk1, sizeof(int) * (count + buffer));
chunk2 = (int *) realloc(chunk2, sizeof(int) * (count + buffer));
}
// If character is a information we want
else{
// Add character to string array
strncat(string, &c, 1);
}
}
// Initialize zero element
chunk1[0] = 0;
chunk2[0] = 2;
// Ensure last value gets placed into arrays
chunk1[317079] = 317079;
chunk2[317079] = 317080;
// Close file
fclose(file);
// Stop midpoint clock
cudaEventRecord(midpoint);
cudaEventElapsedTime(&milliseconds, start2, midpoint);
// Distribute data to GPU threads
N = count;
// Allocate space for counting arrays
int *d_chunk1 = (int *) malloc (N * sizeof(int));
int *d_count1 = (int *) malloc ((N + 10) * sizeof(int));
int *count_array1 = (int *) malloc ((N + 10) * sizeof(int));
int *opposite_count_array = (int *) malloc (N * sizeof(int));
// Allocate memory for each vector on GPU
cudaMalloc( &d_chunk1, (N + 1) * sizeof(int) );
cudaMalloc( &d_count1, (N + 1) * sizeof(int) );
// Copy vectors to device
cudaMemcpy( d_chunk1, chunk1, N * sizeof(int), cudaMemcpyHostToDevice );
// Initialize count arrays
for(i = 0; i < N; i++){
count_array1[i] = 0;
opposite_count_array[i] = 0;
}
// Copy vectors to device
cudaMemcpy( d_count1, count_array1, N * sizeof(int), cudaMemcpyHostToDevice );
// Initialize variables for CUDA
int blockSize, gridSize;
blockSize = 1024; // Number of threads in each thread block
gridSize = (int) ceil((float) N / blockSize); // Number of thread blocks in grid
// Execute the kernel
sort<<<gridSize, blockSize>>>(d_chunk1, d_count1, N * sizeof(int));
// Copy back to host
cudaMemcpy( count_array1, d_count1, N * sizeof(int), cudaMemcpyDeviceToHost );
// Count authors and place into opposite counting array
int index = 0;
for(i = 0; i < N; i++){
if(count_array1[i] == 0){
opposite_count_array[chunk1[index]] = 1;
}
else{
opposite_count_array[chunk1[index]]++;
}
index++;
}
// Count authors in chunk 2
for(i = 0; i < N; i++){
opposite_count_array[chunk2[i]]++;
}
// End clock
cudaEventRecord(end);
cudaEventElapsedTime(&milliseconds2, start, end);
// Print results
if(DEBUG == 2){
printf("Histogram of authors:\n");
for(i = 1; i < (N / 2); i++){
if(opposite_count_array[i] != 0){
printf("(%d, %d)\n", i, opposite_count_array[i]);
}
}
printf("\n");
}
else if(DEBUG == 3){
int max = 0;
int max_index[N];
for(i = 1; i < (N / 2); i++){
if(opposite_count_array[i] != 0){
if(opposite_count_array[i] >= max){
max = opposite_count_array[i];
}
max_index[i] = 0;
}
}
for(i = 0; i < (N / 2); i++){
if(opposite_count_array[i] == max)
max_index[i] = i;
}
printf("\n\n Midpoint Execution Time (milliseconds): %10.8f\n", milliseconds);
printf("Total Execution Time (milliseconds): %10.8f", milliseconds2);
printf("\n\nThe largest number of co-authors is %d\n", max);
printf("The author(s) is/are: ");
for(i = 0; i < (N / 2); i++){
if(max_index[i] != 0){
printf("%d ", i);
}
}
printf("\n\n");
}
else if (DEBUG == 0){
for(i = 1; i < (N / 2); i++){
count_array1[i] = 0;
if(opposite_count_array[i] != 0){
count_array1[opposite_count_array[i]]++;
}
}
for(i = 0; i < (N / 2); i++){
// Author ID, co-author count
if(count_array1[i] != 0)
printf("%d,%d\n", i, count_array1[i]);
}
}
// Cleanly exit
cudaFree(d_chunk1);
cudaFree(d_count1);
free(chunk1);
free(chunk2);
free(count_array1);
free(opposite_count_array);
return 0;
}
|
311c82ccca30cecc2275b35dc9b2ccdf57cc7d68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
__global__
void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
size_t size = N * sizeof(float);
float *x = (float*)malloc(size);
float *y = (float*)malloc(size);
float *d_x, *d_y;
hipMalloc(&d_x, size);
hipMalloc(&d_y, size);
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, y, size, hipMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipEvent_t start;
hipEvent_t stop;
// Creating event
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, d_x, d_y);
// hipEventRecord(stop);
hipMemcpy(y, d_y, size, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << " elapsed" << std::endl;
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
return 0;
}
| 311c82ccca30cecc2275b35dc9b2ccdf57cc7d68.cu | #include <iostream>
#include <cmath>
__global__
void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
size_t size = N * sizeof(float);
float *x = (float*)malloc(size);
float *y = (float*)malloc(size);
float *d_x, *d_y;
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
cudaEvent_t start;
cudaEvent_t stop;
// Creating event
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
add<<<numBlocks, blockSize>>>(N, d_x, d_y);
// cudaEventRecord(stop);
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << " elapsed" << std::endl;
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
return 0;
}
|
58717f1bd588283c1644ef0457354f35be03394e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <mpi.h>
extern "C" {
#include <Rinternals.h>
#include "../mpi_utils.h"
}
extern "C" SEXP R_glmrgame_init(SEXP comm_)
{
int ngpus;
int rank;
int id;
MPI_Comm *comm = get_mpi_comm_from_Robj(comm_);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
hipGetDeviceCount(&ngpus);
id = rank % ngpus;
hipSetDevice(id);
#ifdef GLMRGAME_DEBUG
printf("ngpus=%d rank=%d id=%d\n", ngpus, rank, id);
#endif
return R_NilValue;
}
| 58717f1bd588283c1644ef0457354f35be03394e.cu | #include <cuda_runtime.h>
#include <mpi.h>
extern "C" {
#include <Rinternals.h>
#include "../mpi_utils.h"
}
extern "C" SEXP R_glmrgame_init(SEXP comm_)
{
int ngpus;
int rank;
int id;
MPI_Comm *comm = get_mpi_comm_from_Robj(comm_);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
cudaGetDeviceCount(&ngpus);
id = rank % ngpus;
cudaSetDevice(id);
#ifdef GLMRGAME_DEBUG
printf("ngpus=%d rank=%d id=%d\n", ngpus, rank, id);
#endif
return R_NilValue;
}
|
0c6ca54187143281b09323273e3b989847595924.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "H5Cpp.h"
#include <vector>
#include <H5Exception.h>
#include <map>
#include <omp.h>
// The following lines must be located BEFORE '#include <mex.h>'
#ifdef _MSC_VER
#define DLL_EXPORT_SYM __declspec(dllexport)
#else
#define DLL_EXPORT_SYM
#endif
#include <mex.h>
const int max_tags_length = 200000;
const int max_clock_tags_length = 5000;
const int max_channels = 3;
const size_t return_size = 3;
const int file_block_size = 64;
const double tagger_resolution = 82.3e-12;
struct shotData {
bool file_load_completed;
std::vector<short int> channel_list;
std::map<short int, short int> channel_map;
std::vector<long long int> start_tags;
std::vector<long long int> end_tags;
std::vector<long long int> photon_tags;
std::vector<long long int> clock_tags;
std::vector<std::vector<long long int>> sorted_photon_tags;
std::vector<std::vector<long int>> sorted_photon_bins;
std::vector<std::vector<long long int>> sorted_clock_tags;
std::vector<std::vector<long int>> sorted_clock_bins;
std::vector<long int> sorted_photon_tag_pointers;
std::vector<long int> sorted_clock_tag_pointers;
shotData() : sorted_photon_tags(max_channels, std::vector<long long int>(max_tags_length, 0)), sorted_photon_bins(max_channels, std::vector<long int>(max_tags_length, 0)), sorted_photon_tag_pointers(max_channels, 0), sorted_clock_tags(2, std::vector<long long int>(max_clock_tags_length, 0)), sorted_clock_bins(2, std::vector<long int>(max_clock_tags_length, 0)), sorted_clock_tag_pointers(2, 0) {}
};
struct gpuData {
long int *coinc_gpu;
long int *photon_bins_gpu;
long int *start_and_end_clocks_gpu;
int *max_bin_gpu, *pulse_spacing_gpu, *max_pulse_distance_gpu, *photon_bins_length_gpu;
int *offset_gpu;
};
__global__ void calculateCoincidenceGPU_g3(long int *coinc, long int *photon_bins, long int *start_and_end_clocks, int *max_bin, int *pulse_spacing, int *max_pulse_distance, int *offset, int *photon_bins_length, int num_channels, int shot_file_num) {
//Get numerator step to work on
int id = threadIdx.x;
int block = blockIdx.x;
int block_size = blockDim.x;
//Check if the id is something we're going to do a calculation on
int in_range = (block * block_size + id) < ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2));
//Check if we're doing a denominator calculation
int pulse_shift_measurement = in_range && (block * block_size + id >= (*max_bin * 2 + 1) * (*max_bin * 2 + 1));
//Determine effective id for x and y
int id_x = ((block * block_size + id) % (2 * (*max_bin) + 1)) * (!pulse_shift_measurement);
id_x += ((block * block_size + id - (2 * (*max_bin) + 1) * (2 * (*max_bin) + 1)) % (2 * (*max_pulse_distance))) * (pulse_shift_measurement);
int id_y = ((block * block_size + id) / (2 * (*max_bin) + 1)) * (!pulse_shift_measurement);
id_y += ((block * block_size + id - (2 * (*max_bin) + 1) * (2 * (*max_bin) + 1)) / (2 * (*max_pulse_distance))) * (pulse_shift_measurement);
//Check we're not calculating something out of range
if (in_range && (!pulse_shift_measurement || (pulse_shift_measurement && id_x != id_y))) {
int tau_1 = (id_x - (*max_bin)) * (!pulse_shift_measurement);
int pulse_shift_1 = ((id_x - (*max_pulse_distance)) + ((id_x - (*max_pulse_distance)) >= 0)) * (pulse_shift_measurement);
int pulse_shift_2 = ((id_y - (*max_pulse_distance)) + ((id_y - (*max_pulse_distance)) >= 0)) * (pulse_shift_measurement);
tau_1 += pulse_shift_1 * (*pulse_spacing);
int tau_2 = (id_y - (*max_bin)) * (!pulse_shift_measurement);
tau_2 += pulse_shift_2 * (*pulse_spacing);
for (int channel_1 = 0; channel_1 < num_channels; channel_1++) {
for (int channel_2 = channel_1 + 1; channel_2 < num_channels; channel_2++) {
for (int channel_3 = channel_2 + 1; channel_3 < num_channels; channel_3++) {
int i = 0;
int j = 0;
int k = 0;
int running_tot = 0;
while ((i < photon_bins_length[channel_1 + shot_file_num * max_channels]) && (j < photon_bins_length[channel_2 + shot_file_num * max_channels]) && (k < photon_bins_length[channel_3 + shot_file_num * max_channels])) {
int dummy_i = 0;
int dummy_j = 0;
int dummy_k = 0;
int out_window = (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (*max_bin + *max_pulse_distance * *pulse_spacing + start_and_end_clocks[0 + shot_file_num * 2])) || (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (start_and_end_clocks[1 + shot_file_num * 2] - (*max_bin + *max_pulse_distance * *pulse_spacing)));
//Chan_1 > chan_2
int c1_g_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
//Chan_1 > chan_3
int c1_g_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
////Chan_1 < chan_2
//int c1_l_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
////Chan_1 < chan_3
//int c1_l_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
//Chan_1 == chan_2
int c1_e_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
//Chan_1 == chan_3
int c1_e_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
//Increment i if we're outside the window of interest
dummy_i = out_window;
//Start by using chan_1 as a reference for chan_2 and chan_3 to get them to catch up
//Increment j if chan_2 < chan_1
dummy_j += !out_window && c1_g_c2;
//Increment k if chan_3 < chan_1
dummy_k += !out_window && c1_g_c3;
//Now need to deal with situation where chan_1 !> chan_2 && chan_1 !> chan_3
//First the easy situation where chan_1 == chan_2 == chan_3
running_tot += !out_window && c1_e_c2 && c1_e_c3;
dummy_i += !out_window && c1_e_c2 && c1_e_c3;
dummy_j += !out_window && c1_e_c2 && c1_e_c3;
dummy_k += !out_window && c1_e_c2 && c1_e_c3;
//If we haven't incremented dummy_j or dummy_k then by process of elimination dummy_i needs to incremented
dummy_i += !out_window && !dummy_j && !dummy_k;
//running_tot += in_window;
i += dummy_i;
j += dummy_j;
k += dummy_k;
}
coinc[(block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))] += running_tot;
//coinc[(block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))] = (block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2));
}
}
}
}
}
__global__ void calculateDenominatorGPU_g3(long int *denom, long int *photon_bins, long int *start_and_end_clocks, int *max_bin, int *pulse_spacing, int *max_pulse_distance, int *offset, int *photon_bins_length, int num_channels, int shot_file_num) {
//Get denominator step to work on
int id_x = threadIdx.x;
int block_x = blockIdx.x;
int block_size_x = blockDim.x;
int id_y = threadIdx.y;
int block_y = blockIdx.y;
int block_size_y = blockDim.y;
//Check we're not calculating something out of range
if ((block_x * block_size_x + id_x < *max_pulse_distance * 2 + 1) && (block_y * block_size_y + id_y < *max_pulse_distance * 2 + 1)) {
int pulse_shift_1 = block_x * block_size_x + id_x - (*max_pulse_distance);
int pulse_shift_2 = block_y * block_size_y + id_y - (*max_pulse_distance);
if ((pulse_shift_1 != 0) && (pulse_shift_2 != 0) && (pulse_shift_1 != pulse_shift_2)) {
for (int channel_1 = 0; channel_1 < num_channels; channel_1++) {
for (int channel_2 = channel_1 + 1; channel_2 < num_channels; channel_2++) {
for (int channel_3 = channel_2 + 1; channel_3 < num_channels; channel_3++) {
int i = 0;
int j = 0;
int k = 0;
int running_tot = 0;
while ((i < photon_bins_length[channel_1 + shot_file_num * max_channels]) && (j < photon_bins_length[channel_2 + shot_file_num * max_channels]) && (k < photon_bins_length[channel_3 + shot_file_num * max_channels])) {
int dummy_i = 0;
int dummy_j = 0;
int dummy_k = 0;
int out_window = (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (*max_bin + *max_pulse_distance * *pulse_spacing + start_and_end_clocks[0 + shot_file_num * 2])) || (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (start_and_end_clocks[1 + shot_file_num * 2] - (*max_bin + *max_pulse_distance * *pulse_spacing)));
//Chan_1 > chan_2
int c1_g_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
//Chan_1 > chan_3
int c1_g_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
////Chan_1 < chan_2
//int c1_l_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
////Chan_1 < chan_3
//int c1_l_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
//Chan_1 == chan_2
int c1_e_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
//Chan_1 == chan_3
int c1_e_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
//Increment i if we're outside the window of interest
dummy_i = out_window;
//Start by using chan_1 as a reference for chan_2 and chan_3 to get them to catch up
//Increment j if chan_2 < chan_1
dummy_j += !out_window && c1_g_c2;
//Increment k if chan_3 < chan_1
dummy_k += !out_window && c1_g_c3;
//Now need to deal with situation where chan_1 !> chan_2 && chan_1 !> chan_3
//First the easy situation where chan_1 == chan_2 == chan_3
running_tot += !out_window && c1_e_c2 && c1_e_c3;
dummy_i += !out_window && c1_e_c2 && c1_e_c3;
dummy_j += !out_window && c1_e_c2 && c1_e_c3;
dummy_k += !out_window && c1_e_c2 && c1_e_c3;
//If we haven't incremented dummy_j or dummy_k then by process of elimination dummy_i needs to incremented
dummy_i += !out_window && !dummy_j && !dummy_k;
//running_tot += in_window;
i += dummy_i;
j += dummy_j;
k += dummy_k;
}
denom[block_x * block_size_x + id_x + (block_y * block_size_y + id_y) * (*max_pulse_distance * 2 + 1) + shot_file_num * (*max_pulse_distance * 2 + 1) * (*max_pulse_distance * 2 + 1)] += running_tot;
}
}
}
}
}
}
//Function grabs all tags and channel list from file
void fileToShotData(shotData *shot_data, char* filename) {
//Open up file
H5::H5File file(filename, H5F_ACC_RDONLY);
//Open up "Tags" group
H5::Group tag_group(file.openGroup("Tags"));
//Find out how many tag sets there are, should be 4 if not something is fucky
hsize_t numTagsSets = tag_group.getNumObjs();
if (numTagsSets != 4) {
mexPrintf("There should be 4 sets of Tags, found %i\n", numTagsSets);
delete filename;
exit;
}
//Read tags to shotData structure
//First the clock tags
H5::DataSet clock_dset(tag_group.openDataSet("ClockTags0"));
H5::DataSpace clock_dspace = clock_dset.getSpace();
hsize_t clock_length[1];
clock_dspace.getSimpleExtentDims(clock_length, NULL);
shot_data->clock_tags.resize(clock_length[0]);
clock_dset.read(&(*shot_data).clock_tags[0u], H5::PredType::NATIVE_UINT64, clock_dspace);
clock_dspace.close();
clock_dset.close();
//Then start tags
H5::DataSet start_dset(tag_group.openDataSet("StartTag"));
H5::DataSpace start_dspace = start_dset.getSpace();
hsize_t start_length[1];
start_dspace.getSimpleExtentDims(start_length, NULL);
shot_data->start_tags.resize(start_length[0]);
start_dset.read(&(*shot_data).start_tags[0u], H5::PredType::NATIVE_UINT64, start_dspace);
start_dspace.close();
start_dset.close();
//Then end tags
H5::DataSet end_dset(tag_group.openDataSet("EndTag"));
H5::DataSpace end_dspace = end_dset.getSpace();
hsize_t end_length[1];
end_dspace.getSimpleExtentDims(end_length, NULL);
shot_data->end_tags.resize(end_length[0]);
end_dset.read(&(*shot_data).end_tags[0u], H5::PredType::NATIVE_UINT64, end_dspace);
end_dspace.close();
end_dset.close();
//Finally photon tags
H5::DataSet photon_dset(tag_group.openDataSet("TagWindow0"));
H5::DataSpace photon_dspace = photon_dset.getSpace();
hsize_t photon_length[1];
photon_dspace.getSimpleExtentDims(photon_length, NULL);
shot_data->photon_tags.resize(photon_length[0]);
photon_dset.read(&(*shot_data).photon_tags[0u], H5::PredType::NATIVE_UINT64, photon_dspace);
photon_dspace.close();
photon_dset.close();
//And close tags group
tag_group.close();
//Open up "Inform" group
H5::Group inform_group(file.openGroup("Inform"));
//Grab channel list
H5::DataSet chan_dset(inform_group.openDataSet("ChannelList"));
H5::DataSpace chan_dspace = chan_dset.getSpace();
hsize_t chan_length[1];
chan_dspace.getSimpleExtentDims(chan_length, NULL);
shot_data->channel_list.resize(chan_length[0]);
chan_dset.read(&(*shot_data).channel_list[0u], H5::PredType::NATIVE_UINT16, chan_dspace);
chan_dspace.close();
chan_dset.close();
//Close Inform group
inform_group.close();
//Close file
file.close();
//Populate channel map
for (short int i = 0; i < shot_data->channel_list.size(); i++) {
shot_data->channel_map[shot_data->channel_list[i]] = i;
}
}
//Reads relevant information for a block of files into shot_block
void populateBlock(std::vector<shotData> *shot_block, std::vector<char *> *filelist, int block_num) {
//Loop over the block size
for (int i = 0; i < file_block_size; i++) {
//Default to assuming the block is corrupted
(*shot_block)[i].file_load_completed = false;
//Figure out the file id within the filelist
int file_id = block_num * file_block_size + i;
//Check the file_id isn't out of range of the filelist
if (file_id < filelist->size()) {
//Try to load file to shot_block
try {
fileToShotData(&(*shot_block)[i], (*filelist)[file_id]);
(*shot_block)[i].file_load_completed = true;
}
//Will catch if the file is corrupted, print corrupted filenames to command window
catch (...) {
printf("%s appears corrupted\n", (*filelist)[file_id]);
}
}
}
}
//Process the time tags, assigning them to the correct channel, binning them appropriately and removing tags which do not fall in the clock mask
void sortTags(shotData *shot_data) {
long int i;
int high_count = 0;
//Loop over all tags in clock_tags
for (i = 0; i < shot_data->clock_tags.size(); i++) {
//Check if clock tag is a high word
if (shot_data->clock_tags[i] & 1) {
//Up the high count
high_count++;
}
else {
//Determine whether it is the rising (start) or falling (end) slope
int slope = ((shot_data->clock_tags[i] >> 28) & 1);
//Put tag in appropriate clock tag vector and increment the pointer for said vector
shot_data->sorted_clock_tags[slope][shot_data->sorted_clock_tag_pointers[slope]] = ((shot_data->clock_tags[i] >> 1) & 0x7FFFFFF) + (high_count << 27) - ((shot_data->start_tags[1] >> 1) & 0x7FFFFFF);
shot_data->sorted_clock_tag_pointers[slope]++;
}
}
high_count = 0;
//Clock pointer
int clock_pointer = 0;
//Loop over all tags in photon_tags
for (i = 0; i < shot_data->photon_tags.size(); i++) {
//Check if photon tag is a high word
if (shot_data->photon_tags[i] & 1) {
//Up the high count
high_count++;
}
else {
//Figure out if it fits within the mask
long long int time_tag = ((shot_data->photon_tags[i] >> 1) & 0x7FFFFFF) + (high_count << 27) - ((shot_data->start_tags[1] >> 1) & 0x7FFFFFF);
bool valid = true;
while (valid) {
//printf("%i\t%i\t%i\t", time_tag, shot_data->sorted_clock_tags[1][clock_pointer], shot_data->sorted_clock_tags[0][clock_pointer - 1]);
//Increment dummy pointer if channel tag is greater than current start tag
if ((time_tag >= shot_data->sorted_clock_tags[1][clock_pointer]) & (clock_pointer < shot_data->sorted_clock_tag_pointers[1])) {
//printf("up clock pointer\n");
clock_pointer++;
}
//Make sure clock_pointer is greater than 0, preventing an underflow error
else if (clock_pointer > 0) {
//Check if tag is lower than previous end tag i.e. startTags[j-1] < channeltags[i] < endTags[j-1]
if (time_tag <= shot_data->sorted_clock_tags[0][clock_pointer - 1]) {
//printf("add tag tot data\n");
//Determine the index for given tag
int channel_index = shot_data->channel_map.find(((shot_data->photon_tags[i] >> 29) & 7) + 1)->second;
//Bin tag and assign to appropriate vector
shot_data->sorted_photon_tags[channel_index][shot_data->sorted_photon_tag_pointers[channel_index]] = time_tag;
//printf("%i\t%i\t%i\n", channel_index, time_tag, shot_data->sorted_photon_tag_pointers[channel_index]);
shot_data->sorted_photon_tag_pointers[channel_index]++;
}
//Break the valid loop
valid = false;
}
// If tag is smaller than the first start tag
else {
valid = false;
}
}
}
}
}
void tagsToBins(shotData *shot_data, double bin_width) {
double norm_bin_width = bin_width / tagger_resolution;
#pragma omp parallel for
for (int channel = 0; channel < shot_data->sorted_photon_bins.size(); channel++) {
#pragma omp parallel for
for (int i = 0; i < shot_data->sorted_photon_tag_pointers[channel]; i++) {
shot_data->sorted_photon_bins[channel][i] = (long int)ceil(double(shot_data->sorted_photon_tags[channel][i] / norm_bin_width));
}
}
for (int slope = 0; slope <= 1; slope++) {
#pragma omp parallel for
for (int i = 0; i < shot_data->sorted_clock_tag_pointers[slope]; i++) {
shot_data->sorted_clock_bins[slope][i] = (long int)ceil(double(shot_data->sorted_clock_tags[slope][i] / norm_bin_width));
}
}
}
//Sorts photons and bins them for each file in a block
void sortAndBinBlock(std::vector<shotData> *shot_block, double bin_width) {
#pragma omp parallel for
for (int shot_file_num = 0; shot_file_num < file_block_size; shot_file_num++) {
if ((*shot_block)[shot_file_num].file_load_completed) {
sortTags(&(*shot_block)[shot_file_num]);
tagsToBins(&(*shot_block)[shot_file_num], bin_width);
}
}
}
void printShotChannelBins(shotData *shot_data, int channel) {
for (int i = 0; i < shot_data->sorted_photon_tag_pointers[channel]; i++) {
printf("%i\t%i\t%i\n", i, shot_data->sorted_photon_tags[channel][i], shot_data->sorted_photon_bins[channel][i]);
}
}
void mexFunction(int nlhs, mxArray* plhs[], int nrgs, const mxArray* prhs[]) {
//Get list of files to process
mxArray *cell_element_ptr;
mwSize total_num_files, buflen;
//Figure out how many files there are and allocate a vector to hold strings
total_num_files = mxGetNumberOfElements(prhs[0]);
std::vector<char *> filelist(total_num_files);
//Grab filename and stick it into filelist vector
for (int i = 0; i < total_num_files; i++) {
cell_element_ptr = mxGetCell(prhs[0], i);
buflen = mxGetN(cell_element_ptr) * sizeof(mxChar) + 1;
filelist[i] = (char *)mxMalloc(buflen);
mxGetString(cell_element_ptr, filelist[i], buflen);
}
double *max_time;
max_time = (double *)mxGetData(prhs[2]);
double *bin_width;
bin_width = (double *)mxGetData(prhs[1]);
double *pulse_spacing;
pulse_spacing = (double *)mxGetData(prhs[3]);
int *max_pulse_distance;
max_pulse_distance = (int *)mxGetData(prhs[4]);
int *cuda_device_number;
cuda_device_number = (int *)mxGetData(prhs[5]);
printf("Bin width\t%fs\t%fns\t%fs\t%i\n", *max_time * 1e6, *bin_width * 1e9, *pulse_spacing * 1e6, *max_pulse_distance);
int max_bin = (int)round(*max_time / *bin_width);
int bin_pulse_spacing = (int)round(*pulse_spacing / *bin_width);
//Create our array to hold the denominator and numerator
plhs[0] = mxCreateNumericMatrix(1, (max_bin * 2 + 1) * (max_bin * 2 + 1), mxINT32_CLASS, mxREAL);
long int* numer = (long int*)mxGetData(plhs[0]);
plhs[1] = mxCreateNumericMatrix(1, 1, mxINT32_CLASS, mxREAL);
long int* denom = (long int*)mxGetData(plhs[1]);
//Initialise denom and numer to zero
#pragma omp parallel for
for (int i = 0; i < (max_bin * 2 + 1) * (max_bin * 2 + 1); i++) {
numer[i] = 0;
}
denom[0] = 0;
//Figure out how many blocks we need
int blocks_req;
if (total_num_files < file_block_size) {
blocks_req = 1;
}
else if ((total_num_files%file_block_size) == 0) {
blocks_req = total_num_files / file_block_size;
}
else {
blocks_req = total_num_files / file_block_size + 1;
}
printf("Processing %i files in %i blocks\n", total_num_files, blocks_req);
hipError_t cudaStatus = hipSetDevice(*cuda_device_number);
if (cudaStatus != hipSuccess) {
mexPrintf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//Pointers for our various pinned memory for host-GPU DMA
long int* pinned_photon_bins;
long int* pinned_start_and_end_clocks;
int* pinned_photon_bins_length;
hipHostMalloc((long int**)&pinned_photon_bins, max_tags_length * max_channels * file_block_size * sizeof(long int));
hipHostMalloc((long int**)&pinned_start_and_end_clocks, 2 * file_block_size * sizeof(long int));
hipHostMalloc((int**)&pinned_photon_bins_length, max_channels * file_block_size * sizeof(int));
//Load some stuff to the GPU we will use permenantly
//Allocate memory on GPU for various things
gpuData gpu_data;
cudaStatus = hipMalloc((void**)&(gpu_data.photon_bins_gpu), max_channels * max_tags_length * file_block_size * sizeof(long int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc photon_bins_gpu failed\n");
mexPrintf("%s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.offset_gpu), max_channels * file_block_size * sizeof(int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc offset_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.photon_bins_length_gpu), max_channels * file_block_size * sizeof(int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc photon_bins_length_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.coinc_gpu), (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc numer_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.start_and_end_clocks_gpu), 2 * file_block_size * sizeof(long int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc start_and_end_clocks_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.max_bin_gpu), sizeof(int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc max_bin_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.pulse_spacing_gpu), sizeof(int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc pulse_spacing_gpu failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&(gpu_data.max_pulse_distance_gpu), sizeof(int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMalloc max_pulse_distance_gpu failed!\n");
goto Error;
}
//And set some values that are constant across all data
cudaStatus = hipMemcpy((gpu_data.max_bin_gpu), &max_bin, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy failed!\n");
goto Error;
}
cudaStatus = hipMemcpy((gpu_data.pulse_spacing_gpu), &bin_pulse_spacing, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy failed!\n");
goto Error;
}
cudaStatus = hipMemcpy((gpu_data.max_pulse_distance_gpu), max_pulse_distance, sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy failed!\n");
goto Error;
}
//Pointer to first photon bin for each channel
int host_offest_array[max_channels * file_block_size];
for (int i = 0; i < max_channels * file_block_size; i++) {
host_offest_array[i] = i * max_tags_length;
}
cudaStatus = hipMemcpy((gpu_data.offset_gpu), host_offest_array, max_channels * file_block_size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy failed!\n");
goto Error;
}
cudaStatus = hipMemset((gpu_data).coinc_gpu, 0, (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemset failed!\n");
goto Error;
}
//Create some streams for us to use for GPU parallelism
hipStream_t streams[file_block_size];
for (int i = 0; i < file_block_size; i++) {
hipStreamCreate(&streams[i]);
}
//Create some events to allow us to know if previous transfer has completed
hipEvent_t events[file_block_size];
for (int i = 0; i < file_block_size; i++) {
hipEventCreate(&events[i]);
}
//Figure out how many CUDA blocks to chunk the processing up into for the numerator
int cuda_blocks_numer = 0;
int threads_per_block_numer = 128;
if (threads_per_block_numer >= (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))) {
cuda_blocks_numer = 1;
}
else if (((((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) % threads_per_block_numer) == 0) {
cuda_blocks_numer = (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) / threads_per_block_numer;
}
else {
cuda_blocks_numer = (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) / threads_per_block_numer + 1;
}
//Figure out how many CUDA blocks to chunk the processing up into for the denominator
int threads_per_block_dim_denom = (*max_pulse_distance * 2 + 1);
dim3 cuda_threads_denom(threads_per_block_dim_denom, threads_per_block_dim_denom);
dim3 cuda_blocks_denom(1, 1);
//Processes files in blocks
for (int block_num = 0; block_num < blocks_req; block_num++) {
//Allocate a vector to hold a block of shot_data
std::vector<shotData> shot_block(file_block_size);
//Populate the shot_block with data from file
populateBlock(&shot_block, &filelist, block_num);
//Sort tags and convert them to bins
sortAndBinBlock(&shot_block, *bin_width);
//printShotChannelBins(&(shot_block[0]), 1);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
/*cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
mexPrintf("addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//Asyncronously load data to GPU
for (int shot_file_num = 0; shot_file_num < file_block_size; shot_file_num++) {
if ((shot_block)[shot_file_num].file_load_completed) {
int num_channels = (shot_block)[shot_file_num].channel_list.size();
if (num_channels >= 3) {
std::vector<long int*> photon_bins;
long int start_and_end_clocks[2];
std::vector<int> photon_bins_length;
photon_bins.resize(max_channels);
photon_bins_length.resize(max_channels);
start_and_end_clocks[0] = (shot_block)[shot_file_num].sorted_clock_bins[1][0];
start_and_end_clocks[1] = (shot_block)[shot_file_num].sorted_clock_bins[0][0];
for (int i = 0; i < num_channels; i++) {
photon_bins[i] = &((shot_block)[shot_file_num].sorted_photon_bins[i][0]);
photon_bins_length[i] = (shot_block)[shot_file_num].sorted_photon_tag_pointers[i];
}
//Synch to ensure previous asnyc memcopy has finished otherwise we'll start overwriting writing to data that may be DMA'd
hipEventSynchronize(events[shot_file_num]);
//Write photon bins to memory
int photon_offset = shot_file_num * max_channels * max_tags_length;
for (int i = 0; i < photon_bins_length.size(); i++) {
memcpy(pinned_photon_bins + photon_offset, (photon_bins)[i], (photon_bins_length)[i] * sizeof(long int));
cudaStatus = hipMemcpyAsync((gpu_data).photon_bins_gpu + photon_offset, pinned_photon_bins + photon_offset, (photon_bins_length)[i] * sizeof(long int), hipMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != hipSuccess) {
mexPrintf("%i\t%i\n", block_num, shot_file_num);
mexPrintf("hipMemcpy photon_offset failed! Error message: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
photon_offset += max_tags_length;
}
int clock_offset = shot_file_num * 2;
//And other parameters
memcpy(pinned_start_and_end_clocks + clock_offset, start_and_end_clocks, 2 * sizeof(long int));
cudaStatus = hipMemcpyAsync((gpu_data).start_and_end_clocks_gpu + clock_offset, pinned_start_and_end_clocks + clock_offset, 2 * sizeof(long int), hipMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy clock_offset failed!\n");
goto Error;
}
int length_offset = shot_file_num * max_channels;
//Can't copy vector to cuda easily
for (int i = 0; i < photon_bins_length.size(); i++) {
memcpy(pinned_photon_bins_length + i + length_offset, &((photon_bins_length)[i]), sizeof(int));
}
cudaStatus = hipMemcpyAsync((gpu_data).photon_bins_length_gpu + length_offset, pinned_photon_bins_length + length_offset, max_channels * sizeof(int), hipMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy length_offset failed!\n");
goto Error;
}
//Create an event to let us know all the async copies have occured
hipEventRecord(events[shot_file_num], streams[shot_file_num]);
//Launch numerator calculating kernel for each set of channels
calculateCoincidenceGPU_g3 << <cuda_blocks_numer, threads_per_block_numer, 0, streams[shot_file_num] >> >((gpu_data).coinc_gpu, (gpu_data).photon_bins_gpu, (gpu_data).start_and_end_clocks_gpu, (gpu_data).max_bin_gpu, (gpu_data).pulse_spacing_gpu, (gpu_data).max_pulse_distance_gpu, (gpu_data).offset_gpu, (gpu_data).photon_bins_length_gpu, num_channels, shot_file_num);
}
}
}
mexPrintf("Finished block %i/%i\n", block_num + 1, blocks_req);
mexEvalString("pause(.0001);");
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//Free pinned memory
hipHostFree(pinned_photon_bins);
hipHostFree(pinned_photon_bins_length);
hipHostFree(pinned_start_and_end_clocks);
//This is to pull the streamed numerator off the GPU
//Streamed numerator refers to the way the numerator is stored on the GPU where each GPU stream has a seperate numerator
long int *streamed_coinc;
streamed_coinc = (long int *)malloc((((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(streamed_coinc, (gpu_data).coinc_gpu, (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
mexPrintf("hipMemcpy failed!\n");
free(streamed_coinc);
goto Error;
}
//Collapse streamed numerator down to regular numerator
for (int i = 0; i < file_block_size; i++) {
for (int j = 0; j < (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)); j++) {
if (j < ((2 * (max_bin)+1) * (2 * (max_bin)+1))) {
numer[j] += streamed_coinc[j + i * (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))];
}
else {
denom[0] += streamed_coinc[j + i * (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))];
}
}
}
free(streamed_coinc);
//Free filenames we malloc'd earlier
for (int i = 0; i < total_num_files; i++) {
mxFree(filelist[i]);
}
/*cudaStatus = hipFree(gpu_data.max_bin_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed! %s\n", hipGetErrorString(cudaStatus));
}
cudaStatus = hipFree(gpu_data.max_pulse_distance_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.numer_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.offset_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.photon_bins_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.photon_bins_length_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.pulse_spacing_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
cudaStatus = hipFree(gpu_data.start_and_end_clocks_gpu);
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}*/
//Release CUDA device
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
mexPrintf("hipDeviceReset failed!\n");
}
Error:
hipFree((gpu_data.coinc_gpu));
hipFree((gpu_data.offset_gpu));
hipFree((gpu_data.max_bin_gpu));
hipFree((gpu_data.pulse_spacing_gpu));
hipFree((gpu_data.max_pulse_distance_gpu));
hipFree((gpu_data.photon_bins_length_gpu));
hipFree(gpu_data.photon_bins_gpu);
hipFree(gpu_data.start_and_end_clocks_gpu);
hipHostFree(pinned_photon_bins);
hipHostFree(pinned_photon_bins_length);
hipHostFree(pinned_start_and_end_clocks);
hipDeviceReset();
} | 0c6ca54187143281b09323273e3b989847595924.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "H5Cpp.h"
#include <vector>
#include <H5Exception.h>
#include <map>
#include <omp.h>
// The following lines must be located BEFORE '#include <mex.h>'
#ifdef _MSC_VER
#define DLL_EXPORT_SYM __declspec(dllexport)
#else
#define DLL_EXPORT_SYM
#endif
#include <mex.h>
const int max_tags_length = 200000;
const int max_clock_tags_length = 5000;
const int max_channels = 3;
const size_t return_size = 3;
const int file_block_size = 64;
const double tagger_resolution = 82.3e-12;
struct shotData {
bool file_load_completed;
std::vector<short int> channel_list;
std::map<short int, short int> channel_map;
std::vector<long long int> start_tags;
std::vector<long long int> end_tags;
std::vector<long long int> photon_tags;
std::vector<long long int> clock_tags;
std::vector<std::vector<long long int>> sorted_photon_tags;
std::vector<std::vector<long int>> sorted_photon_bins;
std::vector<std::vector<long long int>> sorted_clock_tags;
std::vector<std::vector<long int>> sorted_clock_bins;
std::vector<long int> sorted_photon_tag_pointers;
std::vector<long int> sorted_clock_tag_pointers;
shotData() : sorted_photon_tags(max_channels, std::vector<long long int>(max_tags_length, 0)), sorted_photon_bins(max_channels, std::vector<long int>(max_tags_length, 0)), sorted_photon_tag_pointers(max_channels, 0), sorted_clock_tags(2, std::vector<long long int>(max_clock_tags_length, 0)), sorted_clock_bins(2, std::vector<long int>(max_clock_tags_length, 0)), sorted_clock_tag_pointers(2, 0) {}
};
struct gpuData {
long int *coinc_gpu;
long int *photon_bins_gpu;
long int *start_and_end_clocks_gpu;
int *max_bin_gpu, *pulse_spacing_gpu, *max_pulse_distance_gpu, *photon_bins_length_gpu;
int *offset_gpu;
};
__global__ void calculateCoincidenceGPU_g3(long int *coinc, long int *photon_bins, long int *start_and_end_clocks, int *max_bin, int *pulse_spacing, int *max_pulse_distance, int *offset, int *photon_bins_length, int num_channels, int shot_file_num) {
//Get numerator step to work on
int id = threadIdx.x;
int block = blockIdx.x;
int block_size = blockDim.x;
//Check if the id is something we're going to do a calculation on
int in_range = (block * block_size + id) < ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2));
//Check if we're doing a denominator calculation
int pulse_shift_measurement = in_range && (block * block_size + id >= (*max_bin * 2 + 1) * (*max_bin * 2 + 1));
//Determine effective id for x and y
int id_x = ((block * block_size + id) % (2 * (*max_bin) + 1)) * (!pulse_shift_measurement);
id_x += ((block * block_size + id - (2 * (*max_bin) + 1) * (2 * (*max_bin) + 1)) % (2 * (*max_pulse_distance))) * (pulse_shift_measurement);
int id_y = ((block * block_size + id) / (2 * (*max_bin) + 1)) * (!pulse_shift_measurement);
id_y += ((block * block_size + id - (2 * (*max_bin) + 1) * (2 * (*max_bin) + 1)) / (2 * (*max_pulse_distance))) * (pulse_shift_measurement);
//Check we're not calculating something out of range
if (in_range && (!pulse_shift_measurement || (pulse_shift_measurement && id_x != id_y))) {
int tau_1 = (id_x - (*max_bin)) * (!pulse_shift_measurement);
int pulse_shift_1 = ((id_x - (*max_pulse_distance)) + ((id_x - (*max_pulse_distance)) >= 0)) * (pulse_shift_measurement);
int pulse_shift_2 = ((id_y - (*max_pulse_distance)) + ((id_y - (*max_pulse_distance)) >= 0)) * (pulse_shift_measurement);
tau_1 += pulse_shift_1 * (*pulse_spacing);
int tau_2 = (id_y - (*max_bin)) * (!pulse_shift_measurement);
tau_2 += pulse_shift_2 * (*pulse_spacing);
for (int channel_1 = 0; channel_1 < num_channels; channel_1++) {
for (int channel_2 = channel_1 + 1; channel_2 < num_channels; channel_2++) {
for (int channel_3 = channel_2 + 1; channel_3 < num_channels; channel_3++) {
int i = 0;
int j = 0;
int k = 0;
int running_tot = 0;
while ((i < photon_bins_length[channel_1 + shot_file_num * max_channels]) && (j < photon_bins_length[channel_2 + shot_file_num * max_channels]) && (k < photon_bins_length[channel_3 + shot_file_num * max_channels])) {
int dummy_i = 0;
int dummy_j = 0;
int dummy_k = 0;
int out_window = (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (*max_bin + *max_pulse_distance * *pulse_spacing + start_and_end_clocks[0 + shot_file_num * 2])) || (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (start_and_end_clocks[1 + shot_file_num * 2] - (*max_bin + *max_pulse_distance * *pulse_spacing)));
//Chan_1 > chan_2
int c1_g_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
//Chan_1 > chan_3
int c1_g_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
////Chan_1 < chan_2
//int c1_l_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
////Chan_1 < chan_3
//int c1_l_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
//Chan_1 == chan_2
int c1_e_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - tau_1));
//Chan_1 == chan_3
int c1_e_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - tau_2));
//Increment i if we're outside the window of interest
dummy_i = out_window;
//Start by using chan_1 as a reference for chan_2 and chan_3 to get them to catch up
//Increment j if chan_2 < chan_1
dummy_j += !out_window && c1_g_c2;
//Increment k if chan_3 < chan_1
dummy_k += !out_window && c1_g_c3;
//Now need to deal with situation where chan_1 !> chan_2 && chan_1 !> chan_3
//First the easy situation where chan_1 == chan_2 == chan_3
running_tot += !out_window && c1_e_c2 && c1_e_c3;
dummy_i += !out_window && c1_e_c2 && c1_e_c3;
dummy_j += !out_window && c1_e_c2 && c1_e_c3;
dummy_k += !out_window && c1_e_c2 && c1_e_c3;
//If we haven't incremented dummy_j or dummy_k then by process of elimination dummy_i needs to incremented
dummy_i += !out_window && !dummy_j && !dummy_k;
//running_tot += in_window;
i += dummy_i;
j += dummy_j;
k += dummy_k;
}
coinc[(block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))] += running_tot;
//coinc[(block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))] = (block * block_size + id) + shot_file_num * ((*max_bin * 2 + 1) * (*max_bin * 2 + 1) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2));
}
}
}
}
}
__global__ void calculateDenominatorGPU_g3(long int *denom, long int *photon_bins, long int *start_and_end_clocks, int *max_bin, int *pulse_spacing, int *max_pulse_distance, int *offset, int *photon_bins_length, int num_channels, int shot_file_num) {
//Get denominator step to work on
int id_x = threadIdx.x;
int block_x = blockIdx.x;
int block_size_x = blockDim.x;
int id_y = threadIdx.y;
int block_y = blockIdx.y;
int block_size_y = blockDim.y;
//Check we're not calculating something out of range
if ((block_x * block_size_x + id_x < *max_pulse_distance * 2 + 1) && (block_y * block_size_y + id_y < *max_pulse_distance * 2 + 1)) {
int pulse_shift_1 = block_x * block_size_x + id_x - (*max_pulse_distance);
int pulse_shift_2 = block_y * block_size_y + id_y - (*max_pulse_distance);
if ((pulse_shift_1 != 0) && (pulse_shift_2 != 0) && (pulse_shift_1 != pulse_shift_2)) {
for (int channel_1 = 0; channel_1 < num_channels; channel_1++) {
for (int channel_2 = channel_1 + 1; channel_2 < num_channels; channel_2++) {
for (int channel_3 = channel_2 + 1; channel_3 < num_channels; channel_3++) {
int i = 0;
int j = 0;
int k = 0;
int running_tot = 0;
while ((i < photon_bins_length[channel_1 + shot_file_num * max_channels]) && (j < photon_bins_length[channel_2 + shot_file_num * max_channels]) && (k < photon_bins_length[channel_3 + shot_file_num * max_channels])) {
int dummy_i = 0;
int dummy_j = 0;
int dummy_k = 0;
int out_window = (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (*max_bin + *max_pulse_distance * *pulse_spacing + start_and_end_clocks[0 + shot_file_num * 2])) || (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (start_and_end_clocks[1 + shot_file_num * 2] - (*max_bin + *max_pulse_distance * *pulse_spacing)));
//Chan_1 > chan_2
int c1_g_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
//Chan_1 > chan_3
int c1_g_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] >(photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
////Chan_1 < chan_2
//int c1_l_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] < (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
////Chan_1 < chan_3
//int c1_l_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] > (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
//Chan_1 == chan_2
int c1_e_c2 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_2 + shot_file_num * max_channels] + j] - pulse_shift_1));
//Chan_1 == chan_3
int c1_e_c3 = !out_window && (photon_bins[offset[channel_1 + shot_file_num * max_channels] + i] == (photon_bins[offset[channel_3 + shot_file_num * max_channels] + k] - pulse_shift_2));
//Increment i if we're outside the window of interest
dummy_i = out_window;
//Start by using chan_1 as a reference for chan_2 and chan_3 to get them to catch up
//Increment j if chan_2 < chan_1
dummy_j += !out_window && c1_g_c2;
//Increment k if chan_3 < chan_1
dummy_k += !out_window && c1_g_c3;
//Now need to deal with situation where chan_1 !> chan_2 && chan_1 !> chan_3
//First the easy situation where chan_1 == chan_2 == chan_3
running_tot += !out_window && c1_e_c2 && c1_e_c3;
dummy_i += !out_window && c1_e_c2 && c1_e_c3;
dummy_j += !out_window && c1_e_c2 && c1_e_c3;
dummy_k += !out_window && c1_e_c2 && c1_e_c3;
//If we haven't incremented dummy_j or dummy_k then by process of elimination dummy_i needs to incremented
dummy_i += !out_window && !dummy_j && !dummy_k;
//running_tot += in_window;
i += dummy_i;
j += dummy_j;
k += dummy_k;
}
denom[block_x * block_size_x + id_x + (block_y * block_size_y + id_y) * (*max_pulse_distance * 2 + 1) + shot_file_num * (*max_pulse_distance * 2 + 1) * (*max_pulse_distance * 2 + 1)] += running_tot;
}
}
}
}
}
}
//Function grabs all tags and channel list from file
void fileToShotData(shotData *shot_data, char* filename) {
//Open up file
H5::H5File file(filename, H5F_ACC_RDONLY);
//Open up "Tags" group
H5::Group tag_group(file.openGroup("Tags"));
//Find out how many tag sets there are, should be 4 if not something is fucky
hsize_t numTagsSets = tag_group.getNumObjs();
if (numTagsSets != 4) {
mexPrintf("There should be 4 sets of Tags, found %i\n", numTagsSets);
delete filename;
exit;
}
//Read tags to shotData structure
//First the clock tags
H5::DataSet clock_dset(tag_group.openDataSet("ClockTags0"));
H5::DataSpace clock_dspace = clock_dset.getSpace();
hsize_t clock_length[1];
clock_dspace.getSimpleExtentDims(clock_length, NULL);
shot_data->clock_tags.resize(clock_length[0]);
clock_dset.read(&(*shot_data).clock_tags[0u], H5::PredType::NATIVE_UINT64, clock_dspace);
clock_dspace.close();
clock_dset.close();
//Then start tags
H5::DataSet start_dset(tag_group.openDataSet("StartTag"));
H5::DataSpace start_dspace = start_dset.getSpace();
hsize_t start_length[1];
start_dspace.getSimpleExtentDims(start_length, NULL);
shot_data->start_tags.resize(start_length[0]);
start_dset.read(&(*shot_data).start_tags[0u], H5::PredType::NATIVE_UINT64, start_dspace);
start_dspace.close();
start_dset.close();
//Then end tags
H5::DataSet end_dset(tag_group.openDataSet("EndTag"));
H5::DataSpace end_dspace = end_dset.getSpace();
hsize_t end_length[1];
end_dspace.getSimpleExtentDims(end_length, NULL);
shot_data->end_tags.resize(end_length[0]);
end_dset.read(&(*shot_data).end_tags[0u], H5::PredType::NATIVE_UINT64, end_dspace);
end_dspace.close();
end_dset.close();
//Finally photon tags
H5::DataSet photon_dset(tag_group.openDataSet("TagWindow0"));
H5::DataSpace photon_dspace = photon_dset.getSpace();
hsize_t photon_length[1];
photon_dspace.getSimpleExtentDims(photon_length, NULL);
shot_data->photon_tags.resize(photon_length[0]);
photon_dset.read(&(*shot_data).photon_tags[0u], H5::PredType::NATIVE_UINT64, photon_dspace);
photon_dspace.close();
photon_dset.close();
//And close tags group
tag_group.close();
//Open up "Inform" group
H5::Group inform_group(file.openGroup("Inform"));
//Grab channel list
H5::DataSet chan_dset(inform_group.openDataSet("ChannelList"));
H5::DataSpace chan_dspace = chan_dset.getSpace();
hsize_t chan_length[1];
chan_dspace.getSimpleExtentDims(chan_length, NULL);
shot_data->channel_list.resize(chan_length[0]);
chan_dset.read(&(*shot_data).channel_list[0u], H5::PredType::NATIVE_UINT16, chan_dspace);
chan_dspace.close();
chan_dset.close();
//Close Inform group
inform_group.close();
//Close file
file.close();
//Populate channel map
for (short int i = 0; i < shot_data->channel_list.size(); i++) {
shot_data->channel_map[shot_data->channel_list[i]] = i;
}
}
//Reads relevant information for a block of files into shot_block
void populateBlock(std::vector<shotData> *shot_block, std::vector<char *> *filelist, int block_num) {
//Loop over the block size
for (int i = 0; i < file_block_size; i++) {
//Default to assuming the block is corrupted
(*shot_block)[i].file_load_completed = false;
//Figure out the file id within the filelist
int file_id = block_num * file_block_size + i;
//Check the file_id isn't out of range of the filelist
if (file_id < filelist->size()) {
//Try to load file to shot_block
try {
fileToShotData(&(*shot_block)[i], (*filelist)[file_id]);
(*shot_block)[i].file_load_completed = true;
}
//Will catch if the file is corrupted, print corrupted filenames to command window
catch (...) {
printf("%s appears corrupted\n", (*filelist)[file_id]);
}
}
}
}
//Process the time tags, assigning them to the correct channel, binning them appropriately and removing tags which do not fall in the clock mask
void sortTags(shotData *shot_data) {
long int i;
int high_count = 0;
//Loop over all tags in clock_tags
for (i = 0; i < shot_data->clock_tags.size(); i++) {
//Check if clock tag is a high word
if (shot_data->clock_tags[i] & 1) {
//Up the high count
high_count++;
}
else {
//Determine whether it is the rising (start) or falling (end) slope
int slope = ((shot_data->clock_tags[i] >> 28) & 1);
//Put tag in appropriate clock tag vector and increment the pointer for said vector
shot_data->sorted_clock_tags[slope][shot_data->sorted_clock_tag_pointers[slope]] = ((shot_data->clock_tags[i] >> 1) & 0x7FFFFFF) + (high_count << 27) - ((shot_data->start_tags[1] >> 1) & 0x7FFFFFF);
shot_data->sorted_clock_tag_pointers[slope]++;
}
}
high_count = 0;
//Clock pointer
int clock_pointer = 0;
//Loop over all tags in photon_tags
for (i = 0; i < shot_data->photon_tags.size(); i++) {
//Check if photon tag is a high word
if (shot_data->photon_tags[i] & 1) {
//Up the high count
high_count++;
}
else {
//Figure out if it fits within the mask
long long int time_tag = ((shot_data->photon_tags[i] >> 1) & 0x7FFFFFF) + (high_count << 27) - ((shot_data->start_tags[1] >> 1) & 0x7FFFFFF);
bool valid = true;
while (valid) {
//printf("%i\t%i\t%i\t", time_tag, shot_data->sorted_clock_tags[1][clock_pointer], shot_data->sorted_clock_tags[0][clock_pointer - 1]);
//Increment dummy pointer if channel tag is greater than current start tag
if ((time_tag >= shot_data->sorted_clock_tags[1][clock_pointer]) & (clock_pointer < shot_data->sorted_clock_tag_pointers[1])) {
//printf("up clock pointer\n");
clock_pointer++;
}
//Make sure clock_pointer is greater than 0, preventing an underflow error
else if (clock_pointer > 0) {
//Check if tag is lower than previous end tag i.e. startTags[j-1] < channeltags[i] < endTags[j-1]
if (time_tag <= shot_data->sorted_clock_tags[0][clock_pointer - 1]) {
//printf("add tag tot data\n");
//Determine the index for given tag
int channel_index = shot_data->channel_map.find(((shot_data->photon_tags[i] >> 29) & 7) + 1)->second;
//Bin tag and assign to appropriate vector
shot_data->sorted_photon_tags[channel_index][shot_data->sorted_photon_tag_pointers[channel_index]] = time_tag;
//printf("%i\t%i\t%i\n", channel_index, time_tag, shot_data->sorted_photon_tag_pointers[channel_index]);
shot_data->sorted_photon_tag_pointers[channel_index]++;
}
//Break the valid loop
valid = false;
}
// If tag is smaller than the first start tag
else {
valid = false;
}
}
}
}
}
void tagsToBins(shotData *shot_data, double bin_width) {
double norm_bin_width = bin_width / tagger_resolution;
#pragma omp parallel for
for (int channel = 0; channel < shot_data->sorted_photon_bins.size(); channel++) {
#pragma omp parallel for
for (int i = 0; i < shot_data->sorted_photon_tag_pointers[channel]; i++) {
shot_data->sorted_photon_bins[channel][i] = (long int)ceil(double(shot_data->sorted_photon_tags[channel][i] / norm_bin_width));
}
}
for (int slope = 0; slope <= 1; slope++) {
#pragma omp parallel for
for (int i = 0; i < shot_data->sorted_clock_tag_pointers[slope]; i++) {
shot_data->sorted_clock_bins[slope][i] = (long int)ceil(double(shot_data->sorted_clock_tags[slope][i] / norm_bin_width));
}
}
}
//Sorts photons and bins them for each file in a block
void sortAndBinBlock(std::vector<shotData> *shot_block, double bin_width) {
#pragma omp parallel for
for (int shot_file_num = 0; shot_file_num < file_block_size; shot_file_num++) {
if ((*shot_block)[shot_file_num].file_load_completed) {
sortTags(&(*shot_block)[shot_file_num]);
tagsToBins(&(*shot_block)[shot_file_num], bin_width);
}
}
}
void printShotChannelBins(shotData *shot_data, int channel) {
for (int i = 0; i < shot_data->sorted_photon_tag_pointers[channel]; i++) {
printf("%i\t%i\t%i\n", i, shot_data->sorted_photon_tags[channel][i], shot_data->sorted_photon_bins[channel][i]);
}
}
void mexFunction(int nlhs, mxArray* plhs[], int nrgs, const mxArray* prhs[]) {
//Get list of files to process
mxArray *cell_element_ptr;
mwSize total_num_files, buflen;
//Figure out how many files there are and allocate a vector to hold strings
total_num_files = mxGetNumberOfElements(prhs[0]);
std::vector<char *> filelist(total_num_files);
//Grab filename and stick it into filelist vector
for (int i = 0; i < total_num_files; i++) {
cell_element_ptr = mxGetCell(prhs[0], i);
buflen = mxGetN(cell_element_ptr) * sizeof(mxChar) + 1;
filelist[i] = (char *)mxMalloc(buflen);
mxGetString(cell_element_ptr, filelist[i], buflen);
}
double *max_time;
max_time = (double *)mxGetData(prhs[2]);
double *bin_width;
bin_width = (double *)mxGetData(prhs[1]);
double *pulse_spacing;
pulse_spacing = (double *)mxGetData(prhs[3]);
int *max_pulse_distance;
max_pulse_distance = (int *)mxGetData(prhs[4]);
int *cuda_device_number;
cuda_device_number = (int *)mxGetData(prhs[5]);
printf("Bin width\t%fµs\t%fns\t%fµs\t%i\n", *max_time * 1e6, *bin_width * 1e9, *pulse_spacing * 1e6, *max_pulse_distance);
int max_bin = (int)round(*max_time / *bin_width);
int bin_pulse_spacing = (int)round(*pulse_spacing / *bin_width);
//Create our array to hold the denominator and numerator
plhs[0] = mxCreateNumericMatrix(1, (max_bin * 2 + 1) * (max_bin * 2 + 1), mxINT32_CLASS, mxREAL);
long int* numer = (long int*)mxGetData(plhs[0]);
plhs[1] = mxCreateNumericMatrix(1, 1, mxINT32_CLASS, mxREAL);
long int* denom = (long int*)mxGetData(plhs[1]);
//Initialise denom and numer to zero
#pragma omp parallel for
for (int i = 0; i < (max_bin * 2 + 1) * (max_bin * 2 + 1); i++) {
numer[i] = 0;
}
denom[0] = 0;
//Figure out how many blocks we need
int blocks_req;
if (total_num_files < file_block_size) {
blocks_req = 1;
}
else if ((total_num_files%file_block_size) == 0) {
blocks_req = total_num_files / file_block_size;
}
else {
blocks_req = total_num_files / file_block_size + 1;
}
printf("Processing %i files in %i blocks\n", total_num_files, blocks_req);
cudaError_t cudaStatus = cudaSetDevice(*cuda_device_number);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//Pointers for our various pinned memory for host-GPU DMA
long int* pinned_photon_bins;
long int* pinned_start_and_end_clocks;
int* pinned_photon_bins_length;
cudaMallocHost((long int**)&pinned_photon_bins, max_tags_length * max_channels * file_block_size * sizeof(long int));
cudaMallocHost((long int**)&pinned_start_and_end_clocks, 2 * file_block_size * sizeof(long int));
cudaMallocHost((int**)&pinned_photon_bins_length, max_channels * file_block_size * sizeof(int));
//Load some stuff to the GPU we will use permenantly
//Allocate memory on GPU for various things
gpuData gpu_data;
cudaStatus = cudaMalloc((void**)&(gpu_data.photon_bins_gpu), max_channels * max_tags_length * file_block_size * sizeof(long int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc photon_bins_gpu failed\n");
mexPrintf("%s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.offset_gpu), max_channels * file_block_size * sizeof(int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc offset_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.photon_bins_length_gpu), max_channels * file_block_size * sizeof(int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc photon_bins_length_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.coinc_gpu), (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc numer_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.start_and_end_clocks_gpu), 2 * file_block_size * sizeof(long int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc start_and_end_clocks_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.max_bin_gpu), sizeof(int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc max_bin_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.pulse_spacing_gpu), sizeof(int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc pulse_spacing_gpu failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&(gpu_data.max_pulse_distance_gpu), sizeof(int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMalloc max_pulse_distance_gpu failed!\n");
goto Error;
}
//And set some values that are constant across all data
cudaStatus = cudaMemcpy((gpu_data.max_bin_gpu), &max_bin, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy((gpu_data.pulse_spacing_gpu), &bin_pulse_spacing, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy((gpu_data.max_pulse_distance_gpu), max_pulse_distance, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy failed!\n");
goto Error;
}
//Pointer to first photon bin for each channel
int host_offest_array[max_channels * file_block_size];
for (int i = 0; i < max_channels * file_block_size; i++) {
host_offest_array[i] = i * max_tags_length;
}
cudaStatus = cudaMemcpy((gpu_data.offset_gpu), host_offest_array, max_channels * file_block_size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy failed!\n");
goto Error;
}
cudaStatus = cudaMemset((gpu_data).coinc_gpu, 0, (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemset failed!\n");
goto Error;
}
//Create some streams for us to use for GPU parallelism
cudaStream_t streams[file_block_size];
for (int i = 0; i < file_block_size; i++) {
cudaStreamCreate(&streams[i]);
}
//Create some events to allow us to know if previous transfer has completed
cudaEvent_t events[file_block_size];
for (int i = 0; i < file_block_size; i++) {
cudaEventCreate(&events[i]);
}
//Figure out how many CUDA blocks to chunk the processing up into for the numerator
int cuda_blocks_numer = 0;
int threads_per_block_numer = 128;
if (threads_per_block_numer >= (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))) {
cuda_blocks_numer = 1;
}
else if (((((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) % threads_per_block_numer) == 0) {
cuda_blocks_numer = (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) / threads_per_block_numer;
}
else {
cuda_blocks_numer = (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) / threads_per_block_numer + 1;
}
//Figure out how many CUDA blocks to chunk the processing up into for the denominator
int threads_per_block_dim_denom = (*max_pulse_distance * 2 + 1);
dim3 cuda_threads_denom(threads_per_block_dim_denom, threads_per_block_dim_denom);
dim3 cuda_blocks_denom(1, 1);
//Processes files in blocks
for (int block_num = 0; block_num < blocks_req; block_num++) {
//Allocate a vector to hold a block of shot_data
std::vector<shotData> shot_block(file_block_size);
//Populate the shot_block with data from file
populateBlock(&shot_block, &filelist, block_num);
//Sort tags and convert them to bins
sortAndBinBlock(&shot_block, *bin_width);
//printShotChannelBins(&(shot_block[0]), 1);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
/*cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}*/
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
mexPrintf("addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//Asyncronously load data to GPU
for (int shot_file_num = 0; shot_file_num < file_block_size; shot_file_num++) {
if ((shot_block)[shot_file_num].file_load_completed) {
int num_channels = (shot_block)[shot_file_num].channel_list.size();
if (num_channels >= 3) {
std::vector<long int*> photon_bins;
long int start_and_end_clocks[2];
std::vector<int> photon_bins_length;
photon_bins.resize(max_channels);
photon_bins_length.resize(max_channels);
start_and_end_clocks[0] = (shot_block)[shot_file_num].sorted_clock_bins[1][0];
start_and_end_clocks[1] = (shot_block)[shot_file_num].sorted_clock_bins[0][0];
for (int i = 0; i < num_channels; i++) {
photon_bins[i] = &((shot_block)[shot_file_num].sorted_photon_bins[i][0]);
photon_bins_length[i] = (shot_block)[shot_file_num].sorted_photon_tag_pointers[i];
}
//Synch to ensure previous asnyc memcopy has finished otherwise we'll start overwriting writing to data that may be DMA'd
cudaEventSynchronize(events[shot_file_num]);
//Write photon bins to memory
int photon_offset = shot_file_num * max_channels * max_tags_length;
for (int i = 0; i < photon_bins_length.size(); i++) {
memcpy(pinned_photon_bins + photon_offset, (photon_bins)[i], (photon_bins_length)[i] * sizeof(long int));
cudaStatus = cudaMemcpyAsync((gpu_data).photon_bins_gpu + photon_offset, pinned_photon_bins + photon_offset, (photon_bins_length)[i] * sizeof(long int), cudaMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != cudaSuccess) {
mexPrintf("%i\t%i\n", block_num, shot_file_num);
mexPrintf("cudaMemcpy photon_offset failed! Error message: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
photon_offset += max_tags_length;
}
int clock_offset = shot_file_num * 2;
//And other parameters
memcpy(pinned_start_and_end_clocks + clock_offset, start_and_end_clocks, 2 * sizeof(long int));
cudaStatus = cudaMemcpyAsync((gpu_data).start_and_end_clocks_gpu + clock_offset, pinned_start_and_end_clocks + clock_offset, 2 * sizeof(long int), cudaMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy clock_offset failed!\n");
goto Error;
}
int length_offset = shot_file_num * max_channels;
//Can't copy vector to cuda easily
for (int i = 0; i < photon_bins_length.size(); i++) {
memcpy(pinned_photon_bins_length + i + length_offset, &((photon_bins_length)[i]), sizeof(int));
}
cudaStatus = cudaMemcpyAsync((gpu_data).photon_bins_length_gpu + length_offset, pinned_photon_bins_length + length_offset, max_channels * sizeof(int), cudaMemcpyHostToDevice, streams[shot_file_num]);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy length_offset failed!\n");
goto Error;
}
//Create an event to let us know all the async copies have occured
cudaEventRecord(events[shot_file_num], streams[shot_file_num]);
//Launch numerator calculating kernel for each set of channels
calculateCoincidenceGPU_g3 << <cuda_blocks_numer, threads_per_block_numer, 0, streams[shot_file_num] >> >((gpu_data).coinc_gpu, (gpu_data).photon_bins_gpu, (gpu_data).start_and_end_clocks_gpu, (gpu_data).max_bin_gpu, (gpu_data).pulse_spacing_gpu, (gpu_data).max_pulse_distance_gpu, (gpu_data).offset_gpu, (gpu_data).photon_bins_length_gpu, num_channels, shot_file_num);
}
}
}
mexPrintf("Finished block %i/%i\n", block_num + 1, blocks_req);
mexEvalString("pause(.0001);");
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//Free pinned memory
cudaFreeHost(pinned_photon_bins);
cudaFreeHost(pinned_photon_bins_length);
cudaFreeHost(pinned_start_and_end_clocks);
//This is to pull the streamed numerator off the GPU
//Streamed numerator refers to the way the numerator is stored on the GPU where each GPU stream has a seperate numerator
long int *streamed_coinc;
streamed_coinc = (long int *)malloc((((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int));
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(streamed_coinc, (gpu_data).coinc_gpu, (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)) * file_block_size * sizeof(long int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaMemcpy failed!\n");
free(streamed_coinc);
goto Error;
}
//Collapse streamed numerator down to regular numerator
for (int i = 0; i < file_block_size; i++) {
for (int j = 0; j < (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2)); j++) {
if (j < ((2 * (max_bin)+1) * (2 * (max_bin)+1))) {
numer[j] += streamed_coinc[j + i * (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))];
}
else {
denom[0] += streamed_coinc[j + i * (((2 * (max_bin)+1) * (2 * (max_bin)+1)) + (*max_pulse_distance * 2) * (*max_pulse_distance * 2))];
}
}
}
free(streamed_coinc);
//Free filenames we malloc'd earlier
for (int i = 0; i < total_num_files; i++) {
mxFree(filelist[i]);
}
/*cudaStatus = cudaFree(gpu_data.max_bin_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed! %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaFree(gpu_data.max_pulse_distance_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.numer_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.offset_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.photon_bins_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.photon_bins_length_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.pulse_spacing_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
cudaStatus = cudaFree(gpu_data.start_and_end_clocks_gpu);
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}*/
//Release CUDA device
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
mexPrintf("cudaDeviceReset failed!\n");
}
Error:
cudaFree((gpu_data.coinc_gpu));
cudaFree((gpu_data.offset_gpu));
cudaFree((gpu_data.max_bin_gpu));
cudaFree((gpu_data.pulse_spacing_gpu));
cudaFree((gpu_data.max_pulse_distance_gpu));
cudaFree((gpu_data.photon_bins_length_gpu));
cudaFree(gpu_data.photon_bins_gpu);
cudaFree(gpu_data.start_and_end_clocks_gpu);
cudaFreeHost(pinned_photon_bins);
cudaFreeHost(pinned_photon_bins_length);
cudaFreeHost(pinned_start_and_end_clocks);
cudaDeviceReset();
} |
c7f6b5b769fc9e00e68bbc463b90dc053bc267ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "headers/params.h"
// CUDA-C includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//extern "C" void init_gpu(int argc, char **argv, int enable_debug, size_t *gpu_memory);
//{{{ init_gpu
void init_gpu(int argc, char **arg, int enable_debug, size_t *gpu_memory)
{
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int) error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = CARD;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, ( driverVersion % 100 ) / 10, runtimeVersion / 1000, ( runtimeVersion % 100 ) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float) deviceProp.totalGlobalMem / 1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize);
}
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (
deviceProp.deviceOverlap ? "Yes" : "No" ), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID);
size_t free, total;
hipMemGetInfo(&free, &total);
*gpu_memory = ( free );
//*gpu_memory = ( free*(6.0/7.0) );
}
//}}}
| c7f6b5b769fc9e00e68bbc463b90dc053bc267ec.cu | #include <stdio.h>
#include "headers/params.h"
// CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
//extern "C" void init_gpu(int argc, char **argv, int enable_debug, size_t *gpu_memory);
//{{{ init_gpu
void init_gpu(int argc, char **arg, int enable_debug, size_t *gpu_memory)
{
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int) error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = CARD;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, ( driverVersion % 100 ) / 10, runtimeVersion / 1000, ( runtimeVersion % 100 ) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float) deviceProp.totalGlobalMem / 1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize);
}
printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (
deviceProp.deviceOverlap ? "Yes" : "No" ), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID);
size_t free, total;
cudaMemGetInfo(&free, &total);
*gpu_memory = ( free );
//*gpu_memory = ( free*(6.0/7.0) );
}
//}}}
|
a6357dd6e87b3d0cf0b2501f3222913d4c968565.hip | // !!! This is a file automatically generated by hipify!!!
//Update the implemention to support 2D kernel by Shangchen Zhou
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include "stdio.h"
#define THREAD_PER_BLOCK 512
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
//Define forward operations
// input and output should be of shape [batch_size, n_features, H,W]
// kernel should be of shape [batch_size, n_features*n_features*kernel_size*kernel_size, H, W]
__global__ void KernelConv2D_forward_function(
const int n_output,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* kernel, const long4 kernel_shape, const long4 kernel_stride,
float* output, const long4 output_shape, const long4 output_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_output) {
return;
}
float output_i = 0.0;
int nFeatures = VEC_1(output_shape);
int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape)) % VEC_0(output_shape);
int intDepth = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape);
int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape);
int intX = ( intIndex ) % VEC_3(output_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) {
for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) {
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
output_i += IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(kernel, intBatch, intKernelDepth, intY, intX);
}
}
}
output[intIndex] = output_i;
}
__global__ void CSKernelConv2D_forward_function(
const int n_output,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride,
float* output, const long4 output_shape, const long4 output_stride,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_output) {
return;
}
float output_i = 0.0;
int nFeatures = VEC_1(input_shape);
int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) / VEC_1(input_shape)) % VEC_0(output_shape);
int intDepthIn = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) ) % VEC_1(input_shape);
int intDepthOut = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape);
int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape);
int intX = ( intIndex ) % VEC_3(output_shape);
int outIndex = intBatch * VEC_3(output_shape) * VEC_2(output_shape) * VEC_1(output_shape) + intDepthOut * VEC_3(output_shape) * VEC_2(output_shape) + intY * VEC_3(output_shape) + intX;
int KernelIdx = IDX_3(buckets,intBatch, intY, intX);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) {
//for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) {
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
//int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + kernel_size * kernel_size * intDepthIn + kernel_size * intKernelY + intKernelX;
output_i += IDX_4(input, intBatch, intDepthIn, intY + intKernelY, intX + intKernelX) * IDX_2(kernel_bank, KernelIdx, intKernelDepth);
//}
}
}
if (intDepthIn == 0){
int intBiasDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + nFeatures *kernel_size*kernel_size;
output_i+= IDX_2(kernel_bank, KernelIdx, intBiasDepth);
}
atomicAdd(&output[outIndex], output_i);
//output[intIndex] = output_i;
}
int CSKernelConv2D_forward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel_bank,
int kernel_size,
at::Tensor& output,
at::Tensor& buckets
hipStream_t stream
) {
int n_output = 0;
n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3) * input.size(1);
hipLaunchKernelGGL(( CSKernelConv2D_forward_function), dim3((n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream ,
n_output,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
kernel_bank.data<float>,
make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1),
make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1),
output.data<float>(),
make_long4(output.size(0), output.size(1), output.size(2), output.size(3)),
make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
hipError_t err = hipGetLastError();
// check for errors
if (err != hipSuccess) {
printf("error in forward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
int KernelConv2D_forward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel,
int kernel_size,
at::Tensor& output,
hipStream_t stream
) {
int n_output = 0;
n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3);
hipLaunchKernelGGL(( KernelConv2D_forward_function), dim3((n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream ,
n_output,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
kernel.data<float>(),
make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)),
make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)),
output.data<float>(),
make_long4(output.size(0), output.size(1), output.size(2), output.size(3)),
make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3))
);
hipError_t err = hipGetLastError();
// check for errors
if (err != hipSuccess) {
printf("error in forward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
//Define input backward operations
__global__ void KernelConv2D_backward_function_input(
const int n_grad_input,
const int kernel_size,
const float* kernel, const long4 kernel_shape, const long4 kernel_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_input) {
return;
}
float grad_input_i = 0.0;
int nFeatures = VEC_1(grad_input_shape);
int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape)) % VEC_0(grad_input_shape);
int intDepth = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape);
int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape);
int intX = ( intIndex ) % VEC_3(grad_input_shape);
int kernel_H = VEC_2(kernel_shape);
int kernel_W = VEC_3(kernel_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){
for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) {
// grad_input: B,C,H+k-1,W+k-1
if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = nFeatures * kernel_size * kernel_size * intDepth + kernel_size*kernel_size*intOutChannel + kernel_size * intKernelY + intKernelX;
grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX);
}
}
}
}
grad_input[intIndex] = grad_input_i;
}
//Define kernel backward operations
__global__ void KernelConv2D_backward_function_kernel(
const int n_grad_kernel,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_kernel) {
return;
}
int nFeatures = VEC_1(input_shape)
int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape);
int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape);
int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size;
int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size;
int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape);
int intX = ( intIndex ) % VEC_3(grad_kernel_shape);
int intInChannel = intDepth / nFeatures;
int intOutChannel = intDepth % nFeatures;
// grad_input: B,C,K,K,H,W
grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX);
}
int KernelConv2D_backward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel,
int kernel_size,
at::Tensor& grad_output,
at::Tensor& grad_input,
at::Tensor& grad_kernel,
hipStream_t stream
) {
int n_grad_input = 0;
n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3);
int n_grad_kernel = 0;
n_grad_kernel = grad_kernel.size(0) * grad_kernel.size(1) * grad_kernel.size(2) * grad_kernel.size(3);
hipLaunchKernelGGL(( KernelConv2D_backward_function_input), dim3((n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream ,
n_grad_input,
kernel_size,
kernel.data<float>(),
make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)),
make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_input.data<float>(),
make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)),
make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3))
);
hipLaunchKernelGGL(( KernelConv2D_backward_function_kernel), dim3((n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0,stream ,
n_grad_kernel,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_kernel.data<float>(),
make_long4(grad_kernel.size(0), grad_kernel.size(1), grad_kernel.size(2), grad_kernel.size(3)),
make_long4(grad_kernel.stride(0), grad_kernel.stride(1), grad_kernel.stride(2), grad_kernel.stride(3))
);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in backward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
//Define input backward operations
__global__ void CSKernelConv2D_backward_function_input(
const int n_grad_input,
const int kernel_size,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride,
const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_input) {
return;
}
float grad_input_i = 0.0;
int nFeatures = VEC_1(grad_input_shape);
int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) / VEC_1(grad_output_shape)) % VEC_0(grad_input_shape);
int intDepthOut = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) ) % VEC_1(grad_output_shape);
int intDepthIn = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape);
int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape);
int intX = ( intIndex ) % VEC_3(grad_input_shape);
int OutIdx = intBatch * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) * VEC_1(grad_input_shape) + intDepthIn * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) + intY * VEC_3(grad_input_shape) + intX;
int kernel_H = VEC_2(grad_output_shape);
int kernel_W = VEC_3(grad_output_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){
//for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) {
// grad_input: B,C,H+k-1,W+k-1
if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int KernelIdx = IDX_3(buckets,intBatch, intY - intKernelY, intX - intKernelX);
int intKernelDepth = (nFeatures * kernel_size * kernel_size + 1)* intDepthOut + kernel_size*kernel_size*intDepthIn + kernel_size * intKernelY + intKernelX;
grad_input_i += IDX_2(kernel_bank, KernelIdx, intKernelDepth) * IDX_4(grad_output, intBatch, intDepthOut, intY - intKernelY, intX - intKernelX);
//grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX);
}
//}
}
}
atomicAdd(&(grad_input[OutIdx]), grad_input_i)
//grad_input[intIndex] = grad_input_i;
}
//Define kernel backward operations
__global__ void CSKernelConv2D_backward_function_kernel(
const int n_grad_kernel,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride,
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_kernel) {
return;
}
int isBias = 0;
int nFeatures = VEC_1(input_shape)
//int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape);
//int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape);
//int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size;
//int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size;
//int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape);
//int intX = ( intIndex ) % VEC_3(grad_kernel_shape);
int intBatch = ( intIndex / VEC_1(grad_kernel_shape) / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_0(grad_output_shape);
int intKernelDepth = ( intIndex / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_1(grad_kernel_shape);
int intY = ( intIndex / VEC_3(grad_output_shape) ) % VEC_2(grad_output_shape);
int intX = ( intIndex ) % VEC_3(grad_output_shape);
//int intInChannel = intDepth / nFeatures;
//int intOutChannel = intDepth % nFeatures;
int KernelIdx = IDX_3(buckets,intBatch, intY, intX);
int intOutChannel = intKernelDepth / (nFeatures*kernel_size*kernel_size + 1);
int KernelTemp = (intKernelDepth % (nFeatures*kernel_size*kernel_size + 1));
if(KernelTemp == (nFeatures*kernel_size*kernel_size)){
isBias = 1;
}
// grad_input: B,C,K,K,H,W
//grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX);
if(isBias == 0){
int intInChannel = KernelTemp / (kernel_size*kernel_size);
int intKernelY = (KernelTemp / kernel_size) % kernel_size;
int intKernelX = KernelTemp % kernel_size;
atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), (IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX)));
}
else{
atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), IDX_4(grad_output, intBatch, intOutChannel, intY, intX))
}
}
int CSKernelConv2D_backward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel_bank,
int kernel_size,
at::Tensor& grad_output,
at::Tensor& grad_input,
at::Tensor& grad_kernel,
at::Tensor& buckets,
hipStream_t stream
) {
int n_grad_input = 0;
n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3) * grad_output.size(1);
int n_grad_kernel = 0;
n_grad_kernel = grad_output.size(0) * grad_kernel.size(1) * grad_output.size(2) * grad_output.size(3);
hipLaunchKernelGGL(( KernelConv2D_backward_function_input), dim3((n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream ,
n_grad_input,
kernel_size,
kernel_bank.data<float>(),
make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1),
make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_input.data<float>(),
make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)),
make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
hipLaunchKernelGGL(( KernelConv2D_backward_function_kernel), dim3((n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0,stream ,
n_grad_kernel,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_kernel.data<float>(),
make_long4(grad_kernel.size(0), grad_kernel.size(1), 1, 1),
make_long4(grad_kernel.stride(0), grad_kernel.stride(1), 1, 1),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in backward_cuda_kernel: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
| a6357dd6e87b3d0cf0b2501f3222913d4c968565.cu | //Update the implemention to support 2D kernel by Shangchen Zhou
#include <cuda.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include "stdio.h"
#define THREAD_PER_BLOCK 512
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
//Define forward operations
// input and output should be of shape [batch_size, n_features, H,W]
// kernel should be of shape [batch_size, n_features*n_features*kernel_size*kernel_size, H, W]
__global__ void KernelConv2D_forward_function(
const int n_output,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* kernel, const long4 kernel_shape, const long4 kernel_stride,
float* output, const long4 output_shape, const long4 output_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_output) {
return;
}
float output_i = 0.0;
int nFeatures = VEC_1(output_shape);
int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape)) % VEC_0(output_shape);
int intDepth = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape);
int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape);
int intX = ( intIndex ) % VEC_3(output_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) {
for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) {
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
output_i += IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(kernel, intBatch, intKernelDepth, intY, intX);
}
}
}
output[intIndex] = output_i;
}
__global__ void CSKernelConv2D_forward_function(
const int n_output,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride,
float* output, const long4 output_shape, const long4 output_stride,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_output) {
return;
}
float output_i = 0.0;
int nFeatures = VEC_1(input_shape);
int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) / VEC_1(input_shape)) % VEC_0(output_shape);
int intDepthIn = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) ) % VEC_1(input_shape);
int intDepthOut = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape);
int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape);
int intX = ( intIndex ) % VEC_3(output_shape);
int outIndex = intBatch * VEC_3(output_shape) * VEC_2(output_shape) * VEC_1(output_shape) + intDepthOut * VEC_3(output_shape) * VEC_2(output_shape) + intY * VEC_3(output_shape) + intX;
int KernelIdx = IDX_3(buckets,intBatch, intY, intX);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) {
//for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) {
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
//int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + kernel_size * kernel_size * intDepthIn + kernel_size * intKernelY + intKernelX;
output_i += IDX_4(input, intBatch, intDepthIn, intY + intKernelY, intX + intKernelX) * IDX_2(kernel_bank, KernelIdx, intKernelDepth);
//}
}
}
if (intDepthIn == 0){
int intBiasDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + nFeatures *kernel_size*kernel_size;
output_i+= IDX_2(kernel_bank, KernelIdx, intBiasDepth);
}
atomicAdd(&output[outIndex], output_i);
//output[intIndex] = output_i;
}
int CSKernelConv2D_forward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel_bank,
int kernel_size,
at::Tensor& output,
at::Tensor& buckets
cudaStream_t stream
) {
int n_output = 0;
n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3) * input.size(1);
CSKernelConv2D_forward_function<<< (n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>(
n_output,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
kernel_bank.data<float>,
make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1),
make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1),
output.data<float>(),
make_long4(output.size(0), output.size(1), output.size(2), output.size(3)),
make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
cudaError_t err = cudaGetLastError();
// check for errors
if (err != cudaSuccess) {
printf("error in forward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
int KernelConv2D_forward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel,
int kernel_size,
at::Tensor& output,
cudaStream_t stream
) {
int n_output = 0;
n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3);
KernelConv2D_forward_function<<< (n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>(
n_output,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
kernel.data<float>(),
make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)),
make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)),
output.data<float>(),
make_long4(output.size(0), output.size(1), output.size(2), output.size(3)),
make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3))
);
cudaError_t err = cudaGetLastError();
// check for errors
if (err != cudaSuccess) {
printf("error in forward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
//Define input backward operations
__global__ void KernelConv2D_backward_function_input(
const int n_grad_input,
const int kernel_size,
const float* kernel, const long4 kernel_shape, const long4 kernel_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_input) {
return;
}
float grad_input_i = 0.0;
int nFeatures = VEC_1(grad_input_shape);
int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape)) % VEC_0(grad_input_shape);
int intDepth = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape);
int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape);
int intX = ( intIndex ) % VEC_3(grad_input_shape);
int kernel_H = VEC_2(kernel_shape);
int kernel_W = VEC_3(kernel_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){
for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) {
// grad_input: B,C,H+k-1,W+k-1
if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int intKernelDepth = nFeatures * kernel_size * kernel_size * intDepth + kernel_size*kernel_size*intOutChannel + kernel_size * intKernelY + intKernelX;
grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX);
}
}
}
}
grad_input[intIndex] = grad_input_i;
}
//Define kernel backward operations
__global__ void KernelConv2D_backward_function_kernel(
const int n_grad_kernel,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_kernel) {
return;
}
int nFeatures = VEC_1(input_shape)
int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape);
int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape);
int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size;
int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size;
int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape);
int intX = ( intIndex ) % VEC_3(grad_kernel_shape);
int intInChannel = intDepth / nFeatures;
int intOutChannel = intDepth % nFeatures;
// grad_input: B,C,K,K,H,W
grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX);
}
int KernelConv2D_backward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel,
int kernel_size,
at::Tensor& grad_output,
at::Tensor& grad_input,
at::Tensor& grad_kernel,
cudaStream_t stream
) {
int n_grad_input = 0;
n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3);
int n_grad_kernel = 0;
n_grad_kernel = grad_kernel.size(0) * grad_kernel.size(1) * grad_kernel.size(2) * grad_kernel.size(3);
KernelConv2D_backward_function_input<<< (n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>(
n_grad_input,
kernel_size,
kernel.data<float>(),
make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)),
make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_input.data<float>(),
make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)),
make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3))
);
KernelConv2D_backward_function_kernel<<< (n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0,stream >>>(
n_grad_kernel,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_kernel.data<float>(),
make_long4(grad_kernel.size(0), grad_kernel.size(1), grad_kernel.size(2), grad_kernel.size(3)),
make_long4(grad_kernel.stride(0), grad_kernel.stride(1), grad_kernel.stride(2), grad_kernel.stride(3))
);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in backward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
//Define input backward operations
__global__ void CSKernelConv2D_backward_function_input(
const int n_grad_input,
const int kernel_size,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride,
const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_input) {
return;
}
float grad_input_i = 0.0;
int nFeatures = VEC_1(grad_input_shape);
int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) / VEC_1(grad_output_shape)) % VEC_0(grad_input_shape);
int intDepthOut = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) ) % VEC_1(grad_output_shape);
int intDepthIn = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape);
int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape);
int intX = ( intIndex ) % VEC_3(grad_input_shape);
int OutIdx = intBatch * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) * VEC_1(grad_input_shape) + intDepthIn * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) + intY * VEC_3(grad_input_shape) + intX;
int kernel_H = VEC_2(grad_output_shape);
int kernel_W = VEC_3(grad_output_shape);
for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) {
for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){
//for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) {
// grad_input: B,C,H+k-1,W+k-1
if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){
//int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX;
int KernelIdx = IDX_3(buckets,intBatch, intY - intKernelY, intX - intKernelX);
int intKernelDepth = (nFeatures * kernel_size * kernel_size + 1)* intDepthOut + kernel_size*kernel_size*intDepthIn + kernel_size * intKernelY + intKernelX;
grad_input_i += IDX_2(kernel_bank, KernelIdx, intKernelDepth) * IDX_4(grad_output, intBatch, intDepthOut, intY - intKernelY, intX - intKernelX);
//grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX);
}
//}
}
}
atomicAdd(&(grad_input[OutIdx]), grad_input_i)
//grad_input[intIndex] = grad_input_i;
}
//Define kernel backward operations
__global__ void CSKernelConv2D_backward_function_kernel(
const int n_grad_kernel,
const int kernel_size,
const float* input, const long4 input_shape, const long4 input_stride,
const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride,
float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride,
const int* buckets, const long4 buckets_shape, const long4 buckets_stride,
) {
int intIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (intIndex >= n_grad_kernel) {
return;
}
int isBias = 0;
int nFeatures = VEC_1(input_shape)
//int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape);
//int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape);
//int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size;
//int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size;
//int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape);
//int intX = ( intIndex ) % VEC_3(grad_kernel_shape);
int intBatch = ( intIndex / VEC_1(grad_kernel_shape) / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_0(grad_output_shape);
int intKernelDepth = ( intIndex / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_1(grad_kernel_shape);
int intY = ( intIndex / VEC_3(grad_output_shape) ) % VEC_2(grad_output_shape);
int intX = ( intIndex ) % VEC_3(grad_output_shape);
//int intInChannel = intDepth / nFeatures;
//int intOutChannel = intDepth % nFeatures;
int KernelIdx = IDX_3(buckets,intBatch, intY, intX);
int intOutChannel = intKernelDepth / (nFeatures*kernel_size*kernel_size + 1);
int KernelTemp = (intKernelDepth % (nFeatures*kernel_size*kernel_size + 1));
if(KernelTemp == (nFeatures*kernel_size*kernel_size)){
isBias = 1;
}
// grad_input: B,C,K,K,H,W
//grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX);
if(isBias == 0){
int intInChannel = KernelTemp / (kernel_size*kernel_size);
int intKernelY = (KernelTemp / kernel_size) % kernel_size;
int intKernelX = KernelTemp % kernel_size;
atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), (IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX)));
}
else{
atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), IDX_4(grad_output, intBatch, intOutChannel, intY, intX))
}
}
int CSKernelConv2D_backward_cuda_kernel(
at::Tensor& input,
at::Tensor& kernel_bank,
int kernel_size,
at::Tensor& grad_output,
at::Tensor& grad_input,
at::Tensor& grad_kernel,
at::Tensor& buckets,
cudaStream_t stream
) {
int n_grad_input = 0;
n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3) * grad_output.size(1);
int n_grad_kernel = 0;
n_grad_kernel = grad_output.size(0) * grad_kernel.size(1) * grad_output.size(2) * grad_output.size(3);
KernelConv2D_backward_function_input<<< (n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>(
n_grad_input,
kernel_size,
kernel_bank.data<float>(),
make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1),
make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_input.data<float>(),
make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)),
make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
KernelConv2D_backward_function_kernel<<< (n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0,stream >>>(
n_grad_kernel,
kernel_size,
input.data<float>(),
make_long4(input.size(0), input.size(1), input.size(2), input.size(3)),
make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)),
grad_output.data<float>(),
make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)),
make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)),
grad_kernel.data<float>(),
make_long4(grad_kernel.size(0), grad_kernel.size(1), 1, 1),
make_long4(grad_kernel.stride(0), grad_kernel.stride(1), 1, 1),
buckets.data<int>(),
make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1),
make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1)
);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in backward_cuda_kernel: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
#ifdef __cplusplus
}
#endif
|
bba84b4153fc1e0e354af0f5da4cd56eb4c0ca9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void printSuccessForCorrectExecutionConfiguration()
{
if(threadIdx.x == 1023 && blockIdx.x == 255)
{
printf("Success!\n");
}
}
int main()
{
/*
* This is one possible execution context that will make
* the kernel launch print its success message.
*/
hipLaunchKernelGGL(( printSuccessForCorrectExecutionConfiguration), dim3(256), dim3(1024), 0, 0, );
/*
* Don't forget kernel execution is asynchronous and you must
* sync on its completion.
*/
hipDeviceSynchronize();
} | bba84b4153fc1e0e354af0f5da4cd56eb4c0ca9b.cu | #include <stdio.h>
__global__ void printSuccessForCorrectExecutionConfiguration()
{
if(threadIdx.x == 1023 && blockIdx.x == 255)
{
printf("Success!\n");
}
}
int main()
{
/*
* This is one possible execution context that will make
* the kernel launch print its success message.
*/
printSuccessForCorrectExecutionConfiguration<<<256, 1024>>>();
/*
* Don't forget kernel execution is asynchronous and you must
* sync on its completion.
*/
cudaDeviceSynchronize();
} |
b5e3460eb76197b7269191cd526f4efcbcc8facf.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/generic_window_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void CropDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
//LOG(INFO) << "I AM AT WORK ####################### GPU ###################";
Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
DLOG(INFO) << "Prefetch copied";
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
//New thread will be created by GenericWindowData Layer
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
this->prefetch_free_.push(batch);
fwd_count_ += batch->data_.num();
//LOG(INFO) << "GPU FWD COUNT: " << fwd_count_;
if (fwd_count_ >= num_examples_)
fwd_count_ = fwd_count_ % num_examples_;
}
INSTANTIATE_LAYER_GPU_FORWARD(CropDataLayer);
} // namespace caffe
| b5e3460eb76197b7269191cd526f4efcbcc8facf.cu | #include <vector>
#include "caffe/layers/generic_window_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void CropDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
//LOG(INFO) << "I AM AT WORK ####################### GPU ###################";
Batch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
DLOG(INFO) << "Prefetch copied";
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
//New thread will be created by GenericWindowData Layer
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
this->prefetch_free_.push(batch);
fwd_count_ += batch->data_.num();
//LOG(INFO) << "GPU FWD COUNT: " << fwd_count_;
if (fwd_count_ >= num_examples_)
fwd_count_ = fwd_count_ % num_examples_;
}
INSTANTIATE_LAYER_GPU_FORWARD(CropDataLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.